Merge trunk

Addressing Sandy's comments
This commit is contained in:
Salvatore Orlando
2011-06-23 15:00:59 +01:00
32 changed files with 829 additions and 477 deletions

View File

@@ -97,7 +97,6 @@ flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
flags.DECLARE('gateway_v6', 'nova.network.manager')
flags.DECLARE('images_path', 'nova.image.local')
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
@@ -1056,16 +1055,6 @@ class ImageCommands(object):
machine_images = {}
other_images = {}
directory = os.path.abspath(directory)
# NOTE(vish): If we're importing from the images path dir, attempt
# to move the files out of the way before importing
# so we aren't writing to the same directory. This
# may fail if the dir was a mointpoint.
if (FLAGS.image_service == 'nova.image.local.LocalImageService'
and directory == os.path.abspath(FLAGS.images_path)):
new_dir = "%s_bak" % directory
os.rename(directory, new_dir)
os.mkdir(directory)
directory = new_dir
for fn in glob.glob("%s/*/info.json" % directory):
try:
image_path = os.path.join(fn.rpartition('/')[0], 'image')
@@ -1082,6 +1071,70 @@ class ImageCommands(object):
self._convert_images(machine_images)
class AgentBuildCommands(object):
"""Class for managing agent builds."""
def create(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Creates a new agent build.
arguments: os architecture version url md5hash [hypervisor='xen']"""
ctxt = context.get_admin_context()
agent_build = db.agent_build_create(ctxt,
{'hypervisor': hypervisor,
'os': os,
'architecture': architecture,
'version': version,
'url': url,
'md5hash': md5hash})
def delete(self, os, architecture, hypervisor='xen'):
"""Deletes an existing agent build.
arguments: os architecture [hypervisor='xen']"""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_destroy(ctxt, agent_build_ref['id'])
def list(self, hypervisor=None):
"""Lists all agent builds.
arguments: <none>"""
fmt = "%-10s %-8s %12s %s"
ctxt = context.get_admin_context()
by_hypervisor = {}
for agent_build in db.agent_build_get_all(ctxt):
buildlist = by_hypervisor.get(agent_build.hypervisor)
if not buildlist:
buildlist = by_hypervisor[agent_build.hypervisor] = []
buildlist.append(agent_build)
for key, buildlist in by_hypervisor.iteritems():
if hypervisor and key != hypervisor:
continue
print "Hypervisor: %s" % key
print fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32)
for agent_build in buildlist:
print fmt % (agent_build.os, agent_build.architecture,
agent_build.version, agent_build.md5hash)
print ' %s' % agent_build.url
print
def modify(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Update an existing agent build.
arguments: os architecture version url md5hash [hypervisor='xen']
"""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_update(ctxt, agent_build_ref['id'],
{'version': version,
'url': url,
'md5hash': md5hash})
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
@@ -1094,6 +1147,7 @@ class ConfigCommands(object):
CATEGORIES = [
('account', AccountCommands),
('agent', AgentBuildCommands),
('config', ConfigCommands),
('db', DbCommands),
('fixed', FixedIpCommands),

View File

@@ -1,283 +0,0 @@
source/api/nova..adminclient.rst
source/api/nova..api.direct.rst
source/api/nova..api.ec2.admin.rst
source/api/nova..api.ec2.apirequest.rst
source/api/nova..api.ec2.cloud.rst
source/api/nova..api.ec2.metadatarequesthandler.rst
source/api/nova..api.openstack.auth.rst
source/api/nova..api.openstack.backup_schedules.rst
source/api/nova..api.openstack.common.rst
source/api/nova..api.openstack.consoles.rst
source/api/nova..api.openstack.faults.rst
source/api/nova..api.openstack.flavors.rst
source/api/nova..api.openstack.images.rst
source/api/nova..api.openstack.servers.rst
source/api/nova..api.openstack.shared_ip_groups.rst
source/api/nova..api.openstack.zones.rst
source/api/nova..auth.dbdriver.rst
source/api/nova..auth.fakeldap.rst
source/api/nova..auth.ldapdriver.rst
source/api/nova..auth.manager.rst
source/api/nova..auth.signer.rst
source/api/nova..cloudpipe.pipelib.rst
source/api/nova..compute.api.rst
source/api/nova..compute.instance_types.rst
source/api/nova..compute.manager.rst
source/api/nova..compute.monitor.rst
source/api/nova..compute.power_state.rst
source/api/nova..console.api.rst
source/api/nova..console.fake.rst
source/api/nova..console.manager.rst
source/api/nova..console.xvp.rst
source/api/nova..context.rst
source/api/nova..crypto.rst
source/api/nova..db.api.rst
source/api/nova..db.base.rst
source/api/nova..db.migration.rst
source/api/nova..db.sqlalchemy.api.rst
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
source/api/nova..db.sqlalchemy.migration.rst
source/api/nova..db.sqlalchemy.models.rst
source/api/nova..db.sqlalchemy.session.rst
source/api/nova..exception.rst
source/api/nova..fakememcache.rst
source/api/nova..fakerabbit.rst
source/api/nova..flags.rst
source/api/nova..image.glance.rst
source/api/nova..image.local.rst
source/api/nova..image.s3.rst
source/api/nova..image.service.rst
source/api/nova..log.rst
source/api/nova..manager.rst
source/api/nova..network.api.rst
source/api/nova..network.linux_net.rst
source/api/nova..network.manager.rst
source/api/nova..objectstore.bucket.rst
source/api/nova..objectstore.handler.rst
source/api/nova..objectstore.image.rst
source/api/nova..objectstore.stored.rst
source/api/nova..quota.rst
source/api/nova..rpc.rst
source/api/nova..scheduler.chance.rst
source/api/nova..scheduler.driver.rst
source/api/nova..scheduler.manager.rst
source/api/nova..scheduler.simple.rst
source/api/nova..scheduler.zone.rst
source/api/nova..service.rst
source/api/nova..test.rst
source/api/nova..tests.api.openstack.fakes.rst
source/api/nova..tests.api.openstack.test_adminapi.rst
source/api/nova..tests.api.openstack.test_api.rst
source/api/nova..tests.api.openstack.test_auth.rst
source/api/nova..tests.api.openstack.test_common.rst
source/api/nova..tests.api.openstack.test_faults.rst
source/api/nova..tests.api.openstack.test_flavors.rst
source/api/nova..tests.api.openstack.test_images.rst
source/api/nova..tests.api.openstack.test_ratelimiting.rst
source/api/nova..tests.api.openstack.test_servers.rst
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
source/api/nova..tests.api.openstack.test_zones.rst
source/api/nova..tests.api.test_wsgi.rst
source/api/nova..tests.db.fakes.rst
source/api/nova..tests.declare_flags.rst
source/api/nova..tests.fake_flags.rst
source/api/nova..tests.glance.stubs.rst
source/api/nova..tests.hyperv_unittest.rst
source/api/nova..tests.objectstore_unittest.rst
source/api/nova..tests.real_flags.rst
source/api/nova..tests.runtime_flags.rst
source/api/nova..tests.test_access.rst
source/api/nova..tests.test_api.rst
source/api/nova..tests.test_auth.rst
source/api/nova..tests.test_cloud.rst
source/api/nova..tests.test_compute.rst
source/api/nova..tests.test_console.rst
source/api/nova..tests.test_direct.rst
source/api/nova..tests.test_flags.rst
source/api/nova..tests.test_instance_types.rst
source/api/nova..tests.test_localization.rst
source/api/nova..tests.test_log.rst
source/api/nova..tests.test_middleware.rst
source/api/nova..tests.test_misc.rst
source/api/nova..tests.test_network.rst
source/api/nova..tests.test_quota.rst
source/api/nova..tests.test_rpc.rst
source/api/nova..tests.test_scheduler.rst
source/api/nova..tests.test_service.rst
source/api/nova..tests.test_test.rst
source/api/nova..tests.test_twistd.rst
source/api/nova..tests.test_utils.rst
source/api/nova..tests.test_virt.rst
source/api/nova..tests.test_volume.rst
source/api/nova..tests.test_xenapi.rst
source/api/nova..tests.xenapi.stubs.rst
source/api/nova..twistd.rst
source/api/nova..utils.rst
source/api/nova..version.rst
source/api/nova..virt.connection.rst
source/api/nova..virt.disk.rst
source/api/nova..virt.fake.rst
source/api/nova..virt.hyperv.rst
source/api/nova..virt.images.rst
source/api/nova..virt.libvirt_conn.rst
source/api/nova..virt.xenapi.fake.rst
source/api/nova..virt.xenapi.network_utils.rst
source/api/nova..virt.xenapi.vm_utils.rst
source/api/nova..virt.xenapi.vmops.rst
source/api/nova..virt.xenapi.volume_utils.rst
source/api/nova..virt.xenapi.volumeops.rst
source/api/nova..virt.xenapi_conn.rst
source/api/nova..volume.api.rst
source/api/nova..volume.driver.rst
source/api/nova..volume.manager.rst
source/api/nova..volume.san.rst
source/api/nova..wsgi.rst
source/api/autoindex.rst
source/api/nova..adminclient.rst
source/api/nova..api.direct.rst
source/api/nova..api.ec2.admin.rst
source/api/nova..api.ec2.apirequest.rst
source/api/nova..api.ec2.cloud.rst
source/api/nova..api.ec2.metadatarequesthandler.rst
source/api/nova..api.openstack.auth.rst
source/api/nova..api.openstack.backup_schedules.rst
source/api/nova..api.openstack.common.rst
source/api/nova..api.openstack.consoles.rst
source/api/nova..api.openstack.faults.rst
source/api/nova..api.openstack.flavors.rst
source/api/nova..api.openstack.images.rst
source/api/nova..api.openstack.servers.rst
source/api/nova..api.openstack.shared_ip_groups.rst
source/api/nova..api.openstack.zones.rst
source/api/nova..auth.dbdriver.rst
source/api/nova..auth.fakeldap.rst
source/api/nova..auth.ldapdriver.rst
source/api/nova..auth.manager.rst
source/api/nova..auth.signer.rst
source/api/nova..cloudpipe.pipelib.rst
source/api/nova..compute.api.rst
source/api/nova..compute.instance_types.rst
source/api/nova..compute.manager.rst
source/api/nova..compute.monitor.rst
source/api/nova..compute.power_state.rst
source/api/nova..console.api.rst
source/api/nova..console.fake.rst
source/api/nova..console.manager.rst
source/api/nova..console.xvp.rst
source/api/nova..context.rst
source/api/nova..crypto.rst
source/api/nova..db.api.rst
source/api/nova..db.base.rst
source/api/nova..db.migration.rst
source/api/nova..db.sqlalchemy.api.rst
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
source/api/nova..db.sqlalchemy.migration.rst
source/api/nova..db.sqlalchemy.models.rst
source/api/nova..db.sqlalchemy.session.rst
source/api/nova..exception.rst
source/api/nova..fakememcache.rst
source/api/nova..fakerabbit.rst
source/api/nova..flags.rst
source/api/nova..image.glance.rst
source/api/nova..image.local.rst
source/api/nova..image.s3.rst
source/api/nova..image.service.rst
source/api/nova..log.rst
source/api/nova..manager.rst
source/api/nova..network.api.rst
source/api/nova..network.linux_net.rst
source/api/nova..network.manager.rst
source/api/nova..objectstore.bucket.rst
source/api/nova..objectstore.handler.rst
source/api/nova..objectstore.image.rst
source/api/nova..objectstore.stored.rst
source/api/nova..quota.rst
source/api/nova..rpc.rst
source/api/nova..scheduler.chance.rst
source/api/nova..scheduler.driver.rst
source/api/nova..scheduler.manager.rst
source/api/nova..scheduler.simple.rst
source/api/nova..scheduler.zone.rst
source/api/nova..service.rst
source/api/nova..test.rst
source/api/nova..tests.api.openstack.fakes.rst
source/api/nova..tests.api.openstack.test_adminapi.rst
source/api/nova..tests.api.openstack.test_api.rst
source/api/nova..tests.api.openstack.test_auth.rst
source/api/nova..tests.api.openstack.test_common.rst
source/api/nova..tests.api.openstack.test_faults.rst
source/api/nova..tests.api.openstack.test_flavors.rst
source/api/nova..tests.api.openstack.test_images.rst
source/api/nova..tests.api.openstack.test_ratelimiting.rst
source/api/nova..tests.api.openstack.test_servers.rst
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
source/api/nova..tests.api.openstack.test_zones.rst
source/api/nova..tests.api.test_wsgi.rst
source/api/nova..tests.db.fakes.rst
source/api/nova..tests.declare_flags.rst
source/api/nova..tests.fake_flags.rst
source/api/nova..tests.glance.stubs.rst
source/api/nova..tests.hyperv_unittest.rst
source/api/nova..tests.objectstore_unittest.rst
source/api/nova..tests.real_flags.rst
source/api/nova..tests.runtime_flags.rst
source/api/nova..tests.test_access.rst
source/api/nova..tests.test_api.rst
source/api/nova..tests.test_auth.rst
source/api/nova..tests.test_cloud.rst
source/api/nova..tests.test_compute.rst
source/api/nova..tests.test_console.rst
source/api/nova..tests.test_direct.rst
source/api/nova..tests.test_flags.rst
source/api/nova..tests.test_instance_types.rst
source/api/nova..tests.test_localization.rst
source/api/nova..tests.test_log.rst
source/api/nova..tests.test_middleware.rst
source/api/nova..tests.test_misc.rst
source/api/nova..tests.test_network.rst
source/api/nova..tests.test_quota.rst
source/api/nova..tests.test_rpc.rst
source/api/nova..tests.test_scheduler.rst
source/api/nova..tests.test_service.rst
source/api/nova..tests.test_test.rst
source/api/nova..tests.test_twistd.rst
source/api/nova..tests.test_utils.rst
source/api/nova..tests.test_virt.rst
source/api/nova..tests.test_volume.rst
source/api/nova..tests.test_xenapi.rst
source/api/nova..tests.xenapi.stubs.rst
source/api/nova..twistd.rst
source/api/nova..utils.rst
source/api/nova..version.rst
source/api/nova..virt.connection.rst
source/api/nova..virt.disk.rst
source/api/nova..virt.fake.rst
source/api/nova..virt.hyperv.rst
source/api/nova..virt.images.rst
source/api/nova..virt.libvirt_conn.rst
source/api/nova..virt.xenapi.fake.rst
source/api/nova..virt.xenapi.network_utils.rst
source/api/nova..virt.xenapi.vm_utils.rst
source/api/nova..virt.xenapi.vmops.rst
source/api/nova..virt.xenapi.volume_utils.rst
source/api/nova..virt.xenapi.volumeops.rst
source/api/nova..virt.xenapi_conn.rst
source/api/nova..volume.api.rst
source/api/nova..volume.driver.rst
source/api/nova..volume.manager.rst
source/api/nova..volume.san.rst
source/api/nova..wsgi.rst

View File

@@ -14,10 +14,16 @@
License for the specific language governing permissions and limitations
under the License.
Source for illustrations in doc/source/image_src/zone_distsched_illustrations.odp
(OpenOffice Impress format) Illustrations are "exported" to png and then scaled
to 400x300 or 640x480 as needed and placed in the doc/source/images directory.
Distributed Scheduler
=====
=====================
The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Chance Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
.. image:: /images/dating_service.png
But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone.
@@ -25,75 +31,87 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab
So, how does this all work?
This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the Zones documentation before reading this.
This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the :doc:`devguide/zones` documentation before reading this.
.. image:: /images/zone_aware_scheduler.png
Costs & Weights
----------
When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to putting a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
---------------
When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost.
An example of some other costs might include selecting:
* a GPU-based host over a standard CPU
* a host with fast ethernet over a 10mbps line
* a host that can run Windows instances
* a host in the EU vs North America
* etc
* a GPU-based host over a standard CPU
* a host with fast ethernet over a 10mbps line
* a host that can run Windows instances
* a host in the EU vs North America
* etc
This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly.
.. image:: /images/costs_weights.png
nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler
-----------
------------------------------------------------------
As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions.
Here is how it works:
1. The compute nodes are filtered and the nodes remaining are weighed.
1a. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
1b. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
2. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
3. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
4. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
1. The compute nodes are filtered and the nodes remaining are weighed.
2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
3. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
4. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
5. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
6. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
.. image:: /images/zone_aware_overview.png
`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used.
Filtering and Weighing
------------
----------------------
The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible.
.. image:: /images/filtering.png
Requesting a new instance
------------
-------------------------
Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table.
`nova.compute.api.create()` performed the following actions:
1. it validated all the fields passed into it.
2. it created an entry in the `Instance` table for each instance requested
3. it put one `run_instance` message in the scheduler queue for each instance requested
4. the schedulers picked off the messages and decided which compute node should handle the request.
5. the `run_instance` message was forwarded to the compute node for processing and the instance is created.
6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`s are valid.
1. it validated all the fields passed into it.
2. it created an entry in the `Instance` table for each instance requested
3. it put one `run_instance` message in the scheduler queue for each instance requested
4. the schedulers picked off the messages and decided which compute node should handle the request.
5. the `run_instance` message was forwarded to the compute node for processing and the instance is created.
6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_ids` are valid.
.. image:: /images/nova.compute.api.create.png
Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones.
The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once.
For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently:
1. it validates all the fields passed into it.
2. it creates a single `reservation_id` for all of instances created. This is a UUID.
3. it creates a single `run_instance` request in the scheduler queue
4. a scheduler picks the message off the queue and works on it.
5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`.
6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones.
7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well.
8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed.
9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user.
10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`.
1. it validates all the fields passed into it.
2. it creates a single `reservation_id` for all of instances created. This is a UUID.
3. it creates a single `run_instance` request in the scheduler queue
4. a scheduler picks the message off the queue and works on it.
5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`.
6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones.
7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well.
8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed.
9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user.
10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`.
.. image:: /images/nova.compute.api.create_all_at_once.png
The Catch
-------------
---------
This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world.
When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates.
When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates.
Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key`
@@ -108,7 +126,7 @@ NOTE: The features described in this section are related to the up-coming 'merge
The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created.
NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would not be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches.
@@ -117,7 +135,7 @@ Finally, we need to give the user a way to get information on each of the instan
`python-novaclient` will be extended to support both of these changes.
Host Filter
--------------
-----------
As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms.
@@ -130,21 +148,22 @@ The filter used is determined by the `--default_host_filter` flag, which points
To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (`<hostname>`, `<additional data>`) where `<additional data>` is whatever you want it to be.
Cost Scheduler Weighing
--------------
-----------------------
Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled.
Simple Zone Aware Scheduling
--------------
The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
----------------------------
The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
The `--scheduler_driver` flag is how you specify the scheduler class name.
Flags
--------------
-----
All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file:
::
--allow_admin_api=true
--enable_zone_routing=true
--zone_name=zone1
@@ -162,6 +181,7 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf
Some optional flags which are handy for debugging are:
::
--connection_type=fake
--verbose

View File

@@ -21,7 +21,7 @@ A Nova deployment is called a Zone. A Zone allows you to partition your deployme
The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion.
Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones.
Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones and, in those cases, the grandchild's internal structure would not be known to the grand-parent.
Zones share nothing. They communicate via the public OpenStack API only. No database, queue, user or project definition is shared between Zones.
@@ -99,7 +99,7 @@ You can get the `child zone api url`, `nova api key` and `username` from the `no
export NOVA_URL="http://192.168.2.120:8774/v1.0/"
This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done with this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
Getting a list of child Zones
-----------------------------

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

View File

@@ -585,3 +585,7 @@ class InstanceExists(Duplicate):
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")

View File

@@ -272,7 +272,7 @@ DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
DEFINE_list('glance_api_servers',
['127.0.0.1:9292'],
['%s:9292' % _get_my_ip()],
'list of glance api servers available to nova (host:port)')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
@@ -364,7 +364,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
DEFINE_string('image_service', 'nova.image.local.LocalImageService',
DEFINE_string('image_service', 'nova.image.glance.GlanceImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),

View File

@@ -24,6 +24,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova import utils
from eventlet import greenpool
@@ -106,12 +107,14 @@ def _wrap_method(function, self):
def _process(func, zone):
"""Worker stub for green thread pool. Give the worker
an authenticated nova client and zone info."""
nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
nova.authenticate()
return func(nova, zone)
def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
def call_zone_method(context, method_name, errors_to_ignore=None,
novaclient_collection_name='zones', *args, **kwargs):
"""Returns a list of (zone, call_result) objects."""
if not isinstance(errors_to_ignore, (list, tuple)):
# This will also handle the default None
@@ -121,7 +124,7 @@ def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
results = []
for zone in db.zone_get_all(context):
try:
nova = novaclient.OpenStack(zone.username, zone.password,
nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
nova.authenticate()
except novaclient.exceptions.BadRequest, e:
@@ -131,18 +134,16 @@ def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
#TODO (dabo) - add logic for failure counts per zone,
# with escalation after a given number of failures.
continue
zone_method = getattr(nova.zones, method)
novaclient_collection = getattr(nova, novaclient_collection_name)
collection_method = getattr(novaclient_collection, method_name)
def _error_trap(*args, **kwargs):
try:
return zone_method(*args, **kwargs)
return collection_method(*args, **kwargs)
except Exception as e:
if type(e) in errors_to_ignore:
return None
# TODO (dabo) - want to be able to re-raise here.
# Returning a string now; raising was causing issues.
# raise e
return "ERROR", "%s" % e
raise
res = pool.spawn(_error_trap, *args, **kwargs)
results.append((zone, res))
@@ -201,38 +202,78 @@ class RedirectResult(exception.Error):
class reroute_compute(object):
"""Decorator used to indicate that the method should
delegate the call the child zones if the db query
can't find anything."""
"""
reroute_compute is responsible for trying to lookup a resource in the
current zone and if it's not found there, delegating the call to the
child zones.
Since reroute_compute will be making 'cross-zone' calls, the ID for the
object must come in as a UUID-- if we receive an integer ID, we bail.
The steps involved are:
1. Validate that item_id is UUID like
2. Lookup item by UUID in the zone local database
3. If the item was found, then extract integer ID, and pass that to
the wrapped method. (This ensures that zone-local code can
continue to use integer IDs).
4. If the item was not found, we delgate the call to a child zone
using the UUID.
"""
def __init__(self, method_name):
self.method_name = method_name
def _route_to_child_zones(self, context, collection, item_uuid):
if not FLAGS.enable_zone_routing:
raise exception.InstanceNotFound(instance_id=item_uuid)
zones = db.zone_get_all(context)
if not zones:
raise exception.InstanceNotFound(instance_id=item_uuid)
# Ask the children to provide an answer ...
LOG.debug(_("Asking child zones ..."))
result = self._call_child_zones(zones,
wrap_novaclient_function(_issue_novaclient_command,
collection, self.method_name, item_uuid))
# Scrub the results and raise another exception
# so the API layers can bail out gracefully ...
raise RedirectResult(self.unmarshall_result(result))
def __call__(self, f):
def wrapped_f(*args, **kwargs):
collection, context, item_id = \
collection, context, item_id_or_uuid = \
self.get_collection_context_and_id(args, kwargs)
try:
# Call the original function ...
attempt_reroute = False
if utils.is_uuid_like(item_id_or_uuid):
item_uuid = item_id_or_uuid
try:
instance = db.instance_get_by_uuid(context, item_uuid)
except exception.InstanceNotFound, e:
# NOTE(sirp): since a UUID was passed in, we can attempt
# to reroute to a child zone
attempt_reroute = True
LOG.debug(_("Instance %(item_uuid)s not found "
"locally: '%(e)s'" % locals()))
else:
# NOTE(sirp): since we're not re-routing in this case, and
# we we were passed a UUID, we need to replace that UUID
# with an integer ID in the argument list so that the
# zone-local code can continue to use integer IDs.
item_id = instance['id']
args = list(args) # needs to be mutable to replace
self.replace_uuid_with_id(args, kwargs, item_id)
if attempt_reroute:
return self._route_to_child_zones(context, collection,
item_uuid)
else:
return f(*args, **kwargs)
except exception.InstanceNotFound, e:
LOG.debug(_("Instance %(item_id)s not found "
"locally: '%(e)s'" % locals()))
if not FLAGS.enable_zone_routing:
raise
zones = db.zone_get_all(context)
if not zones:
raise
# Ask the children to provide an answer ...
LOG.debug(_("Asking child zones ..."))
result = self._call_child_zones(zones,
wrap_novaclient_function(_issue_novaclient_command,
collection, self.method_name, item_id))
# Scrub the results and raise another exception
# so the API layers can bail out gracefully ...
raise RedirectResult(self.unmarshall_result(result))
return wrapped_f
def _call_child_zones(self, zones, function):
@@ -251,6 +292,18 @@ class reroute_compute(object):
instance_id = args[2]
return ("servers", context, instance_id)
@staticmethod
def replace_uuid_with_id(args, kwargs, replacement_id):
"""
Extracts the UUID parameter from the arg or kwarg list and replaces
it with an integer ID.
"""
if 'instance_id' in kwargs:
kwargs['instance_id'] = replacement_id
elif len(args) > 1:
args.pop(2)
args.insert(2, replacement_id)
def unmarshall_result(self, zone_responses):
"""Result is a list of responses from each child zone.
Each decorator derivation is responsible to turning this

View File

@@ -89,8 +89,8 @@ class SchedulerManager(manager.Manager):
host = getattr(self.driver, driver_method)(elevated, *args,
**kwargs)
except AttributeError, e:
LOG.exception(_("Driver Method %(driver_method)s missing: %(e)s")
% locals())
LOG.warning(_("Driver Method %(driver_method)s missing: %(e)s."
"Reverting to schedule()") % locals())
host = self.driver.schedule(elevated, topic, *args, **kwargs)
if not host:

View File

@@ -39,7 +39,7 @@ flags.DEFINE_integer("max_networks", 1000,
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
def _schedule_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
if (instance_ref['availability_zone']
@@ -75,6 +75,12 @@ class SimpleScheduler(chance.ChanceScheduler):
" for this request. Is the appropriate"
" service running?"))
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)

View File

@@ -88,9 +88,10 @@ class ZoneAwareScheduler(driver.Scheduler):
instance_properties = request_spec['instance_properties']
name = instance_properties['display_name']
image_id = instance_properties['image_id']
image_ref = instance_properties['image_ref']
meta = instance_properties['metadata']
flavor_id = instance_type['flavorid']
reservation_id = instance_properties['reservation_id']
files = kwargs['injected_files']
ipgroup = None # Not supported in OS API ... yet
@@ -99,18 +100,20 @@ class ZoneAwareScheduler(driver.Scheduler):
child_blob = zone_info['child_blob']
zone = db.zone_get(context, child_zone)
url = zone.api_url
LOG.debug(_("Forwarding instance create call to child zone %(url)s")
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
". ReservationID=%(reservation_id)s")
% locals())
nova = None
try:
nova = novaclient.OpenStack(zone.username, zone.password, url)
nova = novaclient.OpenStack(zone.username, zone.password, None,
url)
nova.authenticate()
except novaclient.exceptions.BadRequest, e:
raise exception.NotAuthorized(_("Bad credentials attempting "
"to talk to zone at %(url)s.") % locals())
nova.servers.create(name, image_id, flavor_id, ipgroup, meta, files,
child_blob)
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
child_blob, reservation_id=reservation_id)
def _provision_resource_from_blob(self, context, item, instance_id,
request_spec, kwargs):
@@ -182,7 +185,11 @@ class ZoneAwareScheduler(driver.Scheduler):
if not build_plan:
raise driver.NoValidHost(_('No hosts were available'))
for item in build_plan:
for num in xrange(request_spec['num_instances']):
if not build_plan:
break
item = build_plan.pop(0)
self._provision_resource(context, item, instance_id, request_spec,
kwargs)

View File

@@ -89,7 +89,8 @@ class ZoneState(object):
def _call_novaclient(zone):
"""Call novaclient. Broken out for testing purposes."""
client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
client = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
return client.zones.info()._info

View File

@@ -32,7 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS['network_size'].SetDefault(8)
FLAGS['num_networks'].SetDefault(2)
FLAGS['fake_network'].SetDefault(True)
FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService')
FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService')
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')

View File

@@ -133,11 +133,11 @@ class HostFilterTestCase(test.TestCase):
raw = ['or',
['and',
['<', '$compute.host_memory_free', 30],
['<', '$compute.disk_available', 300]
['<', '$compute.disk_available', 300],
],
['and',
['>', '$compute.host_memory_free', 70],
['>', '$compute.disk_available', 700]
['>', '$compute.disk_available', 700],
]
]
cooked = json.dumps(raw)
@@ -183,12 +183,12 @@ class HostFilterTestCase(test.TestCase):
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
['not', True, False, True, False]
['not', True, False, True, False],
)))
try:
hf.filter_hosts(self.zone_manager, json.dumps(
'not', True, False, True, False
'not', True, False, True, False,
))
self.fail("Should give KeyError")
except KeyError, e:

View File

@@ -44,7 +44,7 @@ class WeightedSumTestCase(test.TestCase):
hosts = [
FakeHost(1, 512 * MB, 100),
FakeHost(2, 256 * MB, 400),
FakeHost(3, 512 * MB, 100)
FakeHost(3, 512 * MB, 100),
]
weighted_fns = [
@@ -96,7 +96,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
def test_noop_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [
'nova.scheduler.least_cost.noop_cost_fn'
'nova.scheduler.least_cost.noop_cost_fn',
]
FLAGS.noop_cost_fn_weight = 1
@@ -110,7 +110,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
def test_cost_fn_weights(self):
FLAGS.least_cost_scheduler_cost_functions = [
'nova.scheduler.least_cost.noop_cost_fn'
'nova.scheduler.least_cost.noop_cost_fn',
]
FLAGS.noop_cost_fn_weight = 2
@@ -124,7 +124,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
def test_fill_first_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [
'nova.scheduler.least_cost.fill_first_cost_fn'
'nova.scheduler.least_cost.fill_first_cost_fn',
]
FLAGS.fill_first_cost_fn_weight = 1

View File

@@ -48,6 +48,10 @@ flags.DECLARE('stub_network', 'nova.compute.manager')
flags.DECLARE('instances_path', 'nova.compute.manager')
FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class TestDriver(driver.Scheduler):
"""Scheduler Driver for Tests"""
def schedule(context, topic, *args, **kwargs):
@@ -926,12 +930,23 @@ def zone_get_all(context):
]
def fake_instance_get_by_uuid(context, uuid):
if FAKE_UUID_NOT_FOUND:
raise exception.InstanceNotFound(instance_id=uuid)
else:
return {'id': 1}
class FakeRerouteCompute(api.reroute_compute):
def __init__(self, method_name, id_to_return=1):
super(FakeRerouteCompute, self).__init__(method_name)
self.id_to_return = id_to_return
def _call_child_zones(self, zones, function):
return []
def get_collection_context_and_id(self, args, kwargs):
return ("servers", None, 1)
return ("servers", None, self.id_to_return)
def unmarshall_result(self, zone_responses):
return dict(magic="found me")
@@ -960,6 +975,8 @@ class ZoneRedirectTest(test.TestCase):
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(db, 'zone_get_all', zone_get_all)
self.stubs.Set(db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
self.enable_zone_routing = FLAGS.enable_zone_routing
FLAGS.enable_zone_routing = True
@@ -976,8 +993,19 @@ class ZoneRedirectTest(test.TestCase):
except api.RedirectResult, e:
self.fail(_("Successful database hit should succeed"))
def test_trap_not_found_locally(self):
def test_trap_not_found_locally_id_passed(self):
"""When an integer ID is not found locally, we cannot reroute to
another zone, so just return InstanceNotFound exception
"""
decorator = FakeRerouteCompute("foo")
self.assertRaises(exception.InstanceNotFound,
decorator(go_boom), None, None, 1)
def test_trap_not_found_locally_uuid_passed(self):
"""When a UUID is found, if the item isn't found locally, we should
try to reroute to a child zone to see if they have it
"""
decorator = FakeRerouteCompute("foo", id_to_return=FAKE_UUID_NOT_FOUND)
try:
result = decorator(go_boom)(None, None, 1)
self.assertFail(_("Should have rerouted."))
@@ -1110,10 +1138,4 @@ class CallZoneMethodTest(test.TestCase):
def test_call_zone_method_generates_exception(self):
context = {}
method = 'raises_exception'
results = api.call_zone_method(context, method)
# FIXME(sirp): for now the _error_trap code is catching errors and
# converting them to a ("ERROR", "string") tuples. The code (and this
# test) should eventually handle real exceptions.
expected = [(1, ('ERROR', 'testing'))]
self.assertEqual(expected, results)
self.assertRaises(Exception, api.call_zone_method, context, method)

View File

@@ -201,7 +201,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
'instance_properties': {},
'instance_type': {},
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
'blob': "Non-None blob data"
'blob': "Non-None blob data",
}
result = sched.schedule_run_instance(None, 1, request_spec)

View File

@@ -89,7 +89,7 @@ class FakeHttplibConnection(object):
class XmlConversionTestCase(test.TestCase):
"""Unit test api xml conversion"""
def test_number_conversion(self):
conv = apirequest._try_convert
conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
self.assertEqual(conv('True'), True)
self.assertEqual(conv('False'), False)

View File

@@ -35,7 +35,7 @@ from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.image import local
from nova.image import fake
FLAGS = flags.FLAGS
@@ -56,6 +56,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
@@ -69,8 +70,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine', 'image_state': 'available'}}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
# NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
rpc_cast = rpc.cast
@@ -303,7 +304,7 @@ class CloudTestCase(test.TestCase):
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
result1 = result1['imagesSet'][0]
@@ -317,8 +318,8 @@ class CloudTestCase(test.TestCase):
self.assertEqual(2, len(result3['imagesSet']))
# provide an non-existing image_id
self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
@@ -329,8 +330,8 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': True}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
@@ -345,9 +346,9 @@ class CloudTestCase(test.TestCase):
def fake_update(meh, context, image_id, metadata, data=None):
return metadata
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
self.stubs.Set(local.LocalImageService, 'update', fake_update)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
@@ -359,7 +360,7 @@ class CloudTestCase(test.TestCase):
def fake_delete(self, context, id):
return None
self.stubs.Set(local.LocalImageService, 'delete', fake_delete)
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image
result = deregister_image(self.context, 'ami-00000001')
self.assertEqual(result['imageId'], 'ami-00000001')
@@ -369,18 +370,25 @@ class CloudTestCase(test.TestCase):
def fake_detail_empty(self, context):
return []
self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
def test_console_output(self):
instance_type = FLAGS.default_instance_type
max_count = 1
kwargs = {'image_id': 'ami-1',
'instance_type': instance_type,
'max_count': max_count}
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def _run_instance_wait(self, **kwargs):
ec2_instance_id = self._run_instance(**kwargs)
self._wait_for_running(ec2_instance_id)
return ec2_instance_id
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=FLAGS.default_instance_type,
max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
@@ -389,9 +397,7 @@ class CloudTestCase(test.TestCase):
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_ajax_console(self):
kwargs = {'image_id': 'ami-1'}
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
instance_id = self._run_instance(image_id='ami-1')
output = self.cloud.get_ajax_console(context=self.context,
instance_id=[instance_id])
self.assertEquals(output['url'],
@@ -457,6 +463,12 @@ class CloudTestCase(test.TestCase):
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
# stub out the rpc call
def stub_cast(*args, **kwargs):
pass
self.stubs.Set(rpc, 'cast', stub_cast)
kwargs = {'image_id': FLAGS.default_image,
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
@@ -466,7 +478,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(instance['imageId'], 'ami-00000001')
self.assertEqual(instance['displayName'], 'Server 1')
self.assertEqual(instance['instanceId'], 'i-00000001')
self.assertEqual(instance['instanceState']['name'], 'networking')
self.assertEqual(instance['instanceState']['name'], 'scheduling')
self.assertEqual(instance['instanceType'], 'm1.small')
def test_run_instances_image_state_none(self):
@@ -480,7 +492,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine'}}
self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state)
self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs)
@@ -495,7 +507,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt)
self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.ApiError, run_instances,
self.context, **kwargs)
@@ -509,7 +521,7 @@ class CloudTestCase(test.TestCase):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'status': 'active'}
self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active)
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
@@ -538,7 +550,9 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_wont_update_private_fields(self):
inst = db.instance_create(self.context, {})
self.cloud.update_instance(self.context, inst['id'],
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3',
mac_address='DE:AD:BE:EF')
inst = db.instance_get(self.context, inst['id'])
self.assertEqual(None, inst['mac_address'])
@@ -561,3 +575,299 @@ class CloudTestCase(test.TestCase):
vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])
db.volume_destroy(self.context, vol['id'])
def _restart_compute_service(self, periodic_interval=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval:
self.compute = self.start_service(
'compute', periodic_interval=periodic_interval)
else:
self.compute = self.start_service('compute')
def _wait_for_state(self, ctxt, instance_id, predicate):
"""Wait for an stopping instance to be a given state"""
id = ec2utils.ec2_id_to_id(instance_id)
while True:
info = self.cloud.compute_api.get(context=ctxt, instance_id=id)
LOG.debug(info)
if predicate(info):
break
greenthread.sleep(1)
def _wait_for_running(self, instance_id):
def is_running(info):
return info['state_description'] == 'running'
self._wait_for_state(self.context, instance_id, is_running)
def _wait_for_stopped(self, instance_id):
def is_stopped(info):
return info['state_description'] == 'stopped'
self._wait_for_state(self.context, instance_id, is_stopped)
def _wait_for_terminate(self, instance_id):
def is_deleted(info):
return info['deleted']
elevated = self.context.elevated(read_deleted=True)
self._wait_for_state(elevated, instance_id, is_deleted)
def test_stop_start_instance(self):
"""Makes sure stop/start instance works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance_wait(**kwargs)
# a running instance can't be started. It is just ignored.
result = self.cloud.start_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
result = self.cloud.stop_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_stopped(instance_id)
result = self.cloud.start_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_running(instance_id)
result = self.cloud.stop_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._wait_for_stopped(instance_id)
result = self.cloud.terminate_instances(self.context, [instance_id])
greenthread.sleep(0.3)
self.assertTrue(result)
self._restart_compute_service()
def _volume_create(self):
kwargs = {'status': 'available',
'host': self.volume.host,
'size': 1,
'attach_status': 'detached', }
return db.volume_create(self.context, kwargs)
def _assert_volume_attached(self, vol, instance_id, mountpoint):
self.assertEqual(vol['instance_id'], instance_id)
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
def _assert_volume_detached(self, vol):
self.assertEqual(vol['instance_id'], None)
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
vol1 = self._volume_create()
vol2 = self._volume_create()
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1['id'],
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'volume_id': vol2['id'],
'delete_on_termination': True, },
]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
self._wait_for_stopped(ec2_instance_id)
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_detached(vol)
self.cloud.start_instances(self.context, [ec2_instance_id])
self._wait_for_running(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id'])
self.assertTrue(vol['mountpoint'] == '/dev/vdb' or
vol['mountpoint'] == '/dev/vdc')
self.assertEqual(vol['instance_id'], instance_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.cloud.terminate_instances(self.context, [ec2_instance_id])
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=False)
vol = db.volume_get(admin_ctxt, vol1['id'])
self.assertFalse(vol['deleted'])
db.volume_destroy(self.context, vol1['id'])
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=True)
vol = db.volume_get(admin_ctxt, vol2['id'])
self.assertTrue(vol['deleted'])
self._restart_compute_service()
def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
vol1 = self._volume_create()
vol2 = self._volume_create()
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'volume_id': vol1['id'],
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol1['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdb')
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_detached(vol)
self.cloud.compute_api.attach_volume(self.context,
instance_id=instance_id,
volume_id=vol2['id'],
device='/dev/vdc')
greenthread.sleep(0.3)
vol = db.volume_get(self.context, vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
self.cloud.compute_api.detach_volume(self.context,
volume_id=vol1['id'])
greenthread.sleep(0.3)
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
self._wait_for_stopped(ec2_instance_id)
for vol_id in (vol1['id'], vol2['id']):
vol = db.volume_get(self.context, vol_id)
self._assert_volume_detached(vol)
self.cloud.start_instances(self.context, [ec2_instance_id])
self._wait_for_running(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol2['id'])
self._assert_volume_attached(vol, instance_id, '/dev/vdc')
vol = db.volume_get(self.context, vol1['id'])
self._assert_volume_detached(vol)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
greenthread.sleep(0.3)
for vol_id in (vol1['id'], vol2['id']):
vol = db.volume_get(self.context, vol_id)
self.assertEqual(vol['id'], vol_id)
self._assert_volume_detached(vol)
db.volume_destroy(self.context, vol_id)
self._restart_compute_service()
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
greenthread.sleep(0.3)
return result['snapshotId']
def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works."""
vol = self._volume_create()
ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
ec2_snapshot1_id = self._create_snapshot(ec2_volume_id)
snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id)
ec2_snapshot2_id = self._create_snapshot(ec2_volume_id)
snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'snapshot_id': snapshot1_id,
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'snapshot_id': snapshot2_id,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance_wait(**kwargs)
instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
vols = db.volume_get_all_by_instance(self.context, instance_id)
self.assertEqual(len(vols), 2)
vol1_id = None
vol2_id = None
for vol in vols:
snapshot_id = vol['snapshot_id']
if snapshot_id == snapshot1_id:
vol1_id = vol['id']
mountpoint = '/dev/vdb'
elif snapshot_id == snapshot2_id:
vol2_id = vol['id']
mountpoint = '/dev/vdc'
else:
self.fail()
self._assert_volume_attached(vol, instance_id, mountpoint)
self.assertTrue(vol1_id)
self.assertTrue(vol2_id)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
greenthread.sleep(0.3)
self._wait_for_terminate(ec2_instance_id)
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=False)
vol = db.volume_get(admin_ctxt, vol1_id)
self._assert_volume_detached(vol)
self.assertFalse(vol['deleted'])
db.volume_destroy(self.context, vol1_id)
greenthread.sleep(0.3)
admin_ctxt = context.get_admin_context(read_deleted=True)
vol = db.volume_get(admin_ctxt, vol2_id)
self.assertTrue(vol['deleted'])
for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
self.cloud.delete_snapshot(self.context, snapshot_id)
greenthread.sleep(0.3)
db.volume_destroy(self.context, vol['id'])

View File

@@ -22,21 +22,21 @@ Tests For Compute
import mox
import stubout
from nova.auth import manager
from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import flags
import nova.image.fake
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.db.sqlalchemy import models
from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
@@ -73,7 +73,7 @@ class ComputeTestCase(test.TestCase):
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
def tearDown(self):
self.manager.delete_user(self.user)
@@ -228,6 +228,21 @@ class ComputeTestCase(test.TestCase):
self.assert_(instance_ref['launched_at'] < terminate)
self.assert_(instance_ref['deleted_at'] > terminate)
def test_stop(self):
"""Ensure instance can be stopped"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.stop_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_start(self):
"""Ensure instance can be started"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.stop_instance(self.context, instance_id)
self.compute.start_instance(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_pause(self):
"""Ensure instance can be paused"""
instance_id = self._create_instance()
@@ -266,6 +281,14 @@ class ComputeTestCase(test.TestCase):
"File Contents")
self.compute.terminate_instance(self.context, instance_id)
def test_agent_update(self):
"""Ensure instance can have its agent updated"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
self.compute.agent_update(self.context, instance_id,
'http://127.0.0.1/agent', '00112233445566778899aabbccddeeff')
self.compute.terminate_instance(self.context, instance_id)
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance_id = self._create_instance()

View File

@@ -33,12 +33,12 @@ from nova import utils
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import SimpleDH
from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
@@ -84,7 +84,8 @@ class XenAPIVolumeTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}
'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size='0'):
"""Create a volume object."""
@@ -190,8 +191,8 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_determine_is_pv_objectstore(self.stubs)
self.stubs.Set(VMOps, 'reset_network', reset_network)
stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
@@ -211,7 +212,8 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}
'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
@@ -228,6 +230,23 @@ class XenAPIVMTestCase(test.TestCase):
instance = self._create_instance()
self.conn.get_diagnostics(instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
name = "MySnapshot"
self.assertRaises(exception.Error, self.conn.snapshot, instance, name)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
@@ -331,7 +350,7 @@ class XenAPIVMTestCase(test.TestCase):
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
@@ -364,7 +383,8 @@ class XenAPIVMTestCase(test.TestCase):
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
instance_id=1, check_injection=False):
architecture="x86-64", instance_id=1,
check_injection=False):
stubs.stubout_loopingcall_start(self.stubs)
values = {'id': instance_id,
'project_id': self.project.id,
@@ -374,11 +394,14 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id,
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': os_type}
'os_type': os_type,
'architecture': architecture}
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
self.assertTrue(instance.architecture)
def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance'
@@ -396,7 +419,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_fetch_image_glance_disk(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# no new VDI should be found
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
@@ -410,7 +433,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# no new VDI should be found
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
@@ -431,7 +454,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_linux(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux")
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_swapdisk(self):
@@ -460,7 +483,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_windows(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows")
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
@@ -611,7 +634,8 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}
'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
return instance
@@ -621,8 +645,8 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = SimpleDH()
self.bob = SimpleDH()
self.alice = vmops.SimpleDH()
self.bob = vmops.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
@@ -686,7 +710,8 @@ class XenAPIMigrateInstance(test.TestCase):
'local_gb': 5,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux'}
'os_type': 'linux',
'architecture': 'x86-64'}
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
@@ -725,6 +750,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
self.fake_instance.os_type = 'linux'
self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, disk_type):
dt = vm_utils.VMHelper.determine_disk_image_type(
@@ -769,6 +795,28 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
"""Test that cmp_version compares a as less than b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
"""Test that cmp_version compares a as greater than b"""
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
"""Test that cmp_version compares a as equal to b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
"""Test that cmp_version compares non-lexically"""
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
"""Test that cmp_version compares by length as last resort"""
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
class FakeXenApi(object):
"""Fake XenApi for testing HostState."""

View File

@@ -92,6 +92,12 @@ def stubout_stream_disk(stubs):
stubs.Set(vm_utils, '_stream_disk', f)
def stubout_is_vdi_pv(stubs):
def f(_1):
return False
stubs.Set(vm_utils, '_is_vdi_pv', f)
def stubout_determine_is_pv_objectstore(stubs):
"""Assumes VMs never have PV kernels"""
@@ -150,7 +156,7 @@ class FakeSessionForVMTests(fake.SessionBase):
super(FakeSessionForVMTests, self).__init__(uri)
def host_call_plugin(self, _1, _2, plugin, method, _5):
# copy_kernel_vdi returns nothing
# If the call is for 'copy_kernel_vdi' return None.
if method == 'copy_kernel_vdi':
return
sr_ref = fake.get_all('SR')[0]

View File

@@ -35,6 +35,7 @@ import struct
import sys
import time
import types
import uuid
from xml.sax import saxutils
from eventlet import event
@@ -726,3 +727,17 @@ def parse_server_string(server_str):
except:
LOG.debug(_('Invalid server_string: %s' % server_str))
return ('', '')
def gen_uuid():
return uuid.uuid4()
def is_uuid_like(val):
"""For our purposes, a UUID is a string in canoical form:
aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
"""
if not isinstance(val, basestring):
return False
return (len(val) == 36) and (val.count('-') == 4)

View File

@@ -56,9 +56,11 @@ To run a single test module:
"""
import gettext
import heapq
import os
import unittest
import sys
import time
gettext.install('nova', unicode=1)
@@ -183,9 +185,21 @@ class _NullColorizer(object):
self.stream.write(text)
def get_elapsed_time_color(elapsed_time):
if elapsed_time > 1.0:
return 'red'
elif elapsed_time > 0.25:
return 'yellow'
else:
return 'green'
class NovaTestResult(result.TextTestResult):
def __init__(self, *args, **kw):
self.show_elapsed = kw.pop('show_elapsed')
result.TextTestResult.__init__(self, *args, **kw)
self.num_slow_tests = 5
self.slow_tests = [] # this is a fixed-sized heap
self._last_case = None
self.colorizer = None
# NOTE(vish): reset stdout for the terminal check
@@ -197,28 +211,49 @@ class NovaTestResult(result.TextTestResult):
break
sys.stdout = stdout
# NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
# error results in it failing to be initialized later. Otherwise,
# _handleElapsedTime will fail, causing the wrong error message to
# be outputted.
self.start_time = time.time()
def getDescription(self, test):
return str(test)
def _handleElapsedTime(self, test):
self.elapsed_time = time.time() - self.start_time
item = (self.elapsed_time, test)
# Record only the n-slowest tests using heap
if len(self.slow_tests) >= self.num_slow_tests:
heapq.heappushpop(self.slow_tests, item)
else:
heapq.heappush(self.slow_tests, item)
def _writeElapsedTime(self, test):
color = get_elapsed_time_color(self.elapsed_time)
self.colorizer.write(" %.2f" % self.elapsed_time, color)
def _writeResult(self, test, long_result, color, short_result, success):
if self.showAll:
self.colorizer.write(long_result, color)
if self.show_elapsed and success:
self._writeElapsedTime(test)
self.stream.writeln()
elif self.dots:
self.stream.write(short_result)
self.stream.flush()
# NOTE(vish): copied from unittest with edit to add color
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
if self.showAll:
self.colorizer.write("OK", 'green')
self.stream.writeln()
elif self.dots:
self.stream.write('.')
self.stream.flush()
self._handleElapsedTime(test)
self._writeResult(test, 'OK', 'green', '.', True)
# NOTE(vish): copied from unittest with edit to add color
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
if self.showAll:
self.colorizer.write("FAIL", 'red')
self.stream.writeln()
elif self.dots:
self.stream.write('F')
self.stream.flush()
self._handleElapsedTime(test)
self._writeResult(test, 'FAIL', 'red', 'F', False)
# NOTE(vish): copied from nose with edit to add color
def addError(self, test, err):
@@ -226,6 +261,7 @@ class NovaTestResult(result.TextTestResult):
errorClasses. If the exception is a registered class, the
error will be added to the list for that class, not errors.
"""
self._handleElapsedTime(test)
stream = getattr(self, 'stream', None)
ec, ev, tb = err
try:
@@ -252,14 +288,11 @@ class NovaTestResult(result.TextTestResult):
self.errors.append((test, exc_info))
test.passed = False
if stream is not None:
if self.showAll:
self.colorizer.write("ERROR", 'red')
self.stream.writeln()
elif self.dots:
stream.write('E')
self._writeResult(test, 'ERROR', 'red', 'E', False)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.start_time = time.time()
current_case = test.test.__class__.__name__
if self.showAll:
@@ -273,21 +306,47 @@ class NovaTestResult(result.TextTestResult):
class NovaTestRunner(core.TextTestRunner):
def __init__(self, *args, **kwargs):
self.show_elapsed = kwargs.pop('show_elapsed')
core.TextTestRunner.__init__(self, *args, **kwargs)
def _makeResult(self):
return NovaTestResult(self.stream,
self.descriptions,
self.verbosity,
self.config)
self.config,
show_elapsed=self.show_elapsed)
def _writeSlowTests(self, result_):
# Pare out 'fast' tests
slow_tests = [item for item in result_.slow_tests
if get_elapsed_time_color(item[0]) != 'green']
if slow_tests:
slow_total_time = sum(item[0] for item in slow_tests)
self.stream.writeln("Slowest %i tests took %.2f secs:"
% (len(slow_tests), slow_total_time))
for elapsed_time, test in sorted(slow_tests, reverse=True):
time_str = "%.2f" % elapsed_time
self.stream.writeln(" %s %s" % (time_str.ljust(10), test))
def run(self, test):
result_ = core.TextTestRunner.run(self, test)
if self.show_elapsed:
self._writeSlowTests(result_)
return result_
if __name__ == '__main__':
logging.setup()
# If any argument looks like a test name but doesn't have "nova.tests" in
# front of it, automatically add that so we don't have to type as much
show_elapsed = True
argv = []
for x in sys.argv:
if x.startswith('test_'):
argv.append('nova.tests.%s' % x)
elif x.startswith('--hide-elapsed'):
show_elapsed = False
else:
argv.append(x)
@@ -300,5 +359,6 @@ if __name__ == '__main__':
runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity,
config=c)
config=c,
show_elapsed=show_elapsed)
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))

View File

@@ -10,6 +10,7 @@ function usage {
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run pep8"
echo " -h, --help Print this usage message"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
@@ -24,6 +25,7 @@ function process_option {
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
-f|--force) let force=1;;
-p|--pep8) let just_pep8=1;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
}
@@ -34,6 +36,7 @@ always_venv=0
never_venv=0
force=0
noseargs=
noseopts=
wrapper=""
just_pep8=0
@@ -72,7 +75,7 @@ function run_pep8 {
--exclude=vcsversion.py ${srcfiles}
}
NOSETESTS="python run_tests.py $noseargs"
NOSETESTS="python run_tests.py $noseopts $noseargs"
if [ $never_venv -eq 0 ]
then
@@ -107,7 +110,10 @@ fi
run_tests || exit
# Also run pep8 if no options were provided.
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (noseopts), which begin with a '-', and
# arguments (noseargs).
if [ -z "$noseargs" ]; then
run_pep8
fi