merging trunk
@ -1,283 +0,0 @@
|
||||
source/api/nova..adminclient.rst
|
||||
source/api/nova..api.direct.rst
|
||||
source/api/nova..api.ec2.admin.rst
|
||||
source/api/nova..api.ec2.apirequest.rst
|
||||
source/api/nova..api.ec2.cloud.rst
|
||||
source/api/nova..api.ec2.metadatarequesthandler.rst
|
||||
source/api/nova..api.openstack.auth.rst
|
||||
source/api/nova..api.openstack.backup_schedules.rst
|
||||
source/api/nova..api.openstack.common.rst
|
||||
source/api/nova..api.openstack.consoles.rst
|
||||
source/api/nova..api.openstack.faults.rst
|
||||
source/api/nova..api.openstack.flavors.rst
|
||||
source/api/nova..api.openstack.images.rst
|
||||
source/api/nova..api.openstack.servers.rst
|
||||
source/api/nova..api.openstack.shared_ip_groups.rst
|
||||
source/api/nova..api.openstack.zones.rst
|
||||
source/api/nova..auth.dbdriver.rst
|
||||
source/api/nova..auth.fakeldap.rst
|
||||
source/api/nova..auth.ldapdriver.rst
|
||||
source/api/nova..auth.manager.rst
|
||||
source/api/nova..auth.signer.rst
|
||||
source/api/nova..cloudpipe.pipelib.rst
|
||||
source/api/nova..compute.api.rst
|
||||
source/api/nova..compute.instance_types.rst
|
||||
source/api/nova..compute.manager.rst
|
||||
source/api/nova..compute.monitor.rst
|
||||
source/api/nova..compute.power_state.rst
|
||||
source/api/nova..console.api.rst
|
||||
source/api/nova..console.fake.rst
|
||||
source/api/nova..console.manager.rst
|
||||
source/api/nova..console.xvp.rst
|
||||
source/api/nova..context.rst
|
||||
source/api/nova..crypto.rst
|
||||
source/api/nova..db.api.rst
|
||||
source/api/nova..db.base.rst
|
||||
source/api/nova..db.migration.rst
|
||||
source/api/nova..db.sqlalchemy.api.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
|
||||
source/api/nova..db.sqlalchemy.migration.rst
|
||||
source/api/nova..db.sqlalchemy.models.rst
|
||||
source/api/nova..db.sqlalchemy.session.rst
|
||||
source/api/nova..exception.rst
|
||||
source/api/nova..fakememcache.rst
|
||||
source/api/nova..fakerabbit.rst
|
||||
source/api/nova..flags.rst
|
||||
source/api/nova..image.glance.rst
|
||||
source/api/nova..image.local.rst
|
||||
source/api/nova..image.s3.rst
|
||||
source/api/nova..image.service.rst
|
||||
source/api/nova..log.rst
|
||||
source/api/nova..manager.rst
|
||||
source/api/nova..network.api.rst
|
||||
source/api/nova..network.linux_net.rst
|
||||
source/api/nova..network.manager.rst
|
||||
source/api/nova..objectstore.bucket.rst
|
||||
source/api/nova..objectstore.handler.rst
|
||||
source/api/nova..objectstore.image.rst
|
||||
source/api/nova..objectstore.stored.rst
|
||||
source/api/nova..quota.rst
|
||||
source/api/nova..rpc.rst
|
||||
source/api/nova..scheduler.chance.rst
|
||||
source/api/nova..scheduler.driver.rst
|
||||
source/api/nova..scheduler.manager.rst
|
||||
source/api/nova..scheduler.simple.rst
|
||||
source/api/nova..scheduler.zone.rst
|
||||
source/api/nova..service.rst
|
||||
source/api/nova..test.rst
|
||||
source/api/nova..tests.api.openstack.fakes.rst
|
||||
source/api/nova..tests.api.openstack.test_adminapi.rst
|
||||
source/api/nova..tests.api.openstack.test_api.rst
|
||||
source/api/nova..tests.api.openstack.test_auth.rst
|
||||
source/api/nova..tests.api.openstack.test_common.rst
|
||||
source/api/nova..tests.api.openstack.test_faults.rst
|
||||
source/api/nova..tests.api.openstack.test_flavors.rst
|
||||
source/api/nova..tests.api.openstack.test_images.rst
|
||||
source/api/nova..tests.api.openstack.test_ratelimiting.rst
|
||||
source/api/nova..tests.api.openstack.test_servers.rst
|
||||
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
|
||||
source/api/nova..tests.api.openstack.test_zones.rst
|
||||
source/api/nova..tests.api.test_wsgi.rst
|
||||
source/api/nova..tests.db.fakes.rst
|
||||
source/api/nova..tests.declare_flags.rst
|
||||
source/api/nova..tests.fake_flags.rst
|
||||
source/api/nova..tests.glance.stubs.rst
|
||||
source/api/nova..tests.hyperv_unittest.rst
|
||||
source/api/nova..tests.objectstore_unittest.rst
|
||||
source/api/nova..tests.real_flags.rst
|
||||
source/api/nova..tests.runtime_flags.rst
|
||||
source/api/nova..tests.test_access.rst
|
||||
source/api/nova..tests.test_api.rst
|
||||
source/api/nova..tests.test_auth.rst
|
||||
source/api/nova..tests.test_cloud.rst
|
||||
source/api/nova..tests.test_compute.rst
|
||||
source/api/nova..tests.test_console.rst
|
||||
source/api/nova..tests.test_direct.rst
|
||||
source/api/nova..tests.test_flags.rst
|
||||
source/api/nova..tests.test_instance_types.rst
|
||||
source/api/nova..tests.test_localization.rst
|
||||
source/api/nova..tests.test_log.rst
|
||||
source/api/nova..tests.test_middleware.rst
|
||||
source/api/nova..tests.test_misc.rst
|
||||
source/api/nova..tests.test_network.rst
|
||||
source/api/nova..tests.test_quota.rst
|
||||
source/api/nova..tests.test_rpc.rst
|
||||
source/api/nova..tests.test_scheduler.rst
|
||||
source/api/nova..tests.test_service.rst
|
||||
source/api/nova..tests.test_test.rst
|
||||
source/api/nova..tests.test_twistd.rst
|
||||
source/api/nova..tests.test_utils.rst
|
||||
source/api/nova..tests.test_virt.rst
|
||||
source/api/nova..tests.test_volume.rst
|
||||
source/api/nova..tests.test_xenapi.rst
|
||||
source/api/nova..tests.xenapi.stubs.rst
|
||||
source/api/nova..twistd.rst
|
||||
source/api/nova..utils.rst
|
||||
source/api/nova..version.rst
|
||||
source/api/nova..virt.connection.rst
|
||||
source/api/nova..virt.disk.rst
|
||||
source/api/nova..virt.fake.rst
|
||||
source/api/nova..virt.hyperv.rst
|
||||
source/api/nova..virt.images.rst
|
||||
source/api/nova..virt.libvirt_conn.rst
|
||||
source/api/nova..virt.xenapi.fake.rst
|
||||
source/api/nova..virt.xenapi.network_utils.rst
|
||||
source/api/nova..virt.xenapi.vm_utils.rst
|
||||
source/api/nova..virt.xenapi.vmops.rst
|
||||
source/api/nova..virt.xenapi.volume_utils.rst
|
||||
source/api/nova..virt.xenapi.volumeops.rst
|
||||
source/api/nova..virt.xenapi_conn.rst
|
||||
source/api/nova..volume.api.rst
|
||||
source/api/nova..volume.driver.rst
|
||||
source/api/nova..volume.manager.rst
|
||||
source/api/nova..volume.san.rst
|
||||
source/api/nova..wsgi.rst
|
||||
source/api/autoindex.rst
|
||||
source/api/nova..adminclient.rst
|
||||
source/api/nova..api.direct.rst
|
||||
source/api/nova..api.ec2.admin.rst
|
||||
source/api/nova..api.ec2.apirequest.rst
|
||||
source/api/nova..api.ec2.cloud.rst
|
||||
source/api/nova..api.ec2.metadatarequesthandler.rst
|
||||
source/api/nova..api.openstack.auth.rst
|
||||
source/api/nova..api.openstack.backup_schedules.rst
|
||||
source/api/nova..api.openstack.common.rst
|
||||
source/api/nova..api.openstack.consoles.rst
|
||||
source/api/nova..api.openstack.faults.rst
|
||||
source/api/nova..api.openstack.flavors.rst
|
||||
source/api/nova..api.openstack.images.rst
|
||||
source/api/nova..api.openstack.servers.rst
|
||||
source/api/nova..api.openstack.shared_ip_groups.rst
|
||||
source/api/nova..api.openstack.zones.rst
|
||||
source/api/nova..auth.dbdriver.rst
|
||||
source/api/nova..auth.fakeldap.rst
|
||||
source/api/nova..auth.ldapdriver.rst
|
||||
source/api/nova..auth.manager.rst
|
||||
source/api/nova..auth.signer.rst
|
||||
source/api/nova..cloudpipe.pipelib.rst
|
||||
source/api/nova..compute.api.rst
|
||||
source/api/nova..compute.instance_types.rst
|
||||
source/api/nova..compute.manager.rst
|
||||
source/api/nova..compute.monitor.rst
|
||||
source/api/nova..compute.power_state.rst
|
||||
source/api/nova..console.api.rst
|
||||
source/api/nova..console.fake.rst
|
||||
source/api/nova..console.manager.rst
|
||||
source/api/nova..console.xvp.rst
|
||||
source/api/nova..context.rst
|
||||
source/api/nova..crypto.rst
|
||||
source/api/nova..db.api.rst
|
||||
source/api/nova..db.base.rst
|
||||
source/api/nova..db.migration.rst
|
||||
source/api/nova..db.sqlalchemy.api.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
|
||||
source/api/nova..db.sqlalchemy.migration.rst
|
||||
source/api/nova..db.sqlalchemy.models.rst
|
||||
source/api/nova..db.sqlalchemy.session.rst
|
||||
source/api/nova..exception.rst
|
||||
source/api/nova..fakememcache.rst
|
||||
source/api/nova..fakerabbit.rst
|
||||
source/api/nova..flags.rst
|
||||
source/api/nova..image.glance.rst
|
||||
source/api/nova..image.local.rst
|
||||
source/api/nova..image.s3.rst
|
||||
source/api/nova..image.service.rst
|
||||
source/api/nova..log.rst
|
||||
source/api/nova..manager.rst
|
||||
source/api/nova..network.api.rst
|
||||
source/api/nova..network.linux_net.rst
|
||||
source/api/nova..network.manager.rst
|
||||
source/api/nova..objectstore.bucket.rst
|
||||
source/api/nova..objectstore.handler.rst
|
||||
source/api/nova..objectstore.image.rst
|
||||
source/api/nova..objectstore.stored.rst
|
||||
source/api/nova..quota.rst
|
||||
source/api/nova..rpc.rst
|
||||
source/api/nova..scheduler.chance.rst
|
||||
source/api/nova..scheduler.driver.rst
|
||||
source/api/nova..scheduler.manager.rst
|
||||
source/api/nova..scheduler.simple.rst
|
||||
source/api/nova..scheduler.zone.rst
|
||||
source/api/nova..service.rst
|
||||
source/api/nova..test.rst
|
||||
source/api/nova..tests.api.openstack.fakes.rst
|
||||
source/api/nova..tests.api.openstack.test_adminapi.rst
|
||||
source/api/nova..tests.api.openstack.test_api.rst
|
||||
source/api/nova..tests.api.openstack.test_auth.rst
|
||||
source/api/nova..tests.api.openstack.test_common.rst
|
||||
source/api/nova..tests.api.openstack.test_faults.rst
|
||||
source/api/nova..tests.api.openstack.test_flavors.rst
|
||||
source/api/nova..tests.api.openstack.test_images.rst
|
||||
source/api/nova..tests.api.openstack.test_ratelimiting.rst
|
||||
source/api/nova..tests.api.openstack.test_servers.rst
|
||||
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
|
||||
source/api/nova..tests.api.openstack.test_zones.rst
|
||||
source/api/nova..tests.api.test_wsgi.rst
|
||||
source/api/nova..tests.db.fakes.rst
|
||||
source/api/nova..tests.declare_flags.rst
|
||||
source/api/nova..tests.fake_flags.rst
|
||||
source/api/nova..tests.glance.stubs.rst
|
||||
source/api/nova..tests.hyperv_unittest.rst
|
||||
source/api/nova..tests.objectstore_unittest.rst
|
||||
source/api/nova..tests.real_flags.rst
|
||||
source/api/nova..tests.runtime_flags.rst
|
||||
source/api/nova..tests.test_access.rst
|
||||
source/api/nova..tests.test_api.rst
|
||||
source/api/nova..tests.test_auth.rst
|
||||
source/api/nova..tests.test_cloud.rst
|
||||
source/api/nova..tests.test_compute.rst
|
||||
source/api/nova..tests.test_console.rst
|
||||
source/api/nova..tests.test_direct.rst
|
||||
source/api/nova..tests.test_flags.rst
|
||||
source/api/nova..tests.test_instance_types.rst
|
||||
source/api/nova..tests.test_localization.rst
|
||||
source/api/nova..tests.test_log.rst
|
||||
source/api/nova..tests.test_middleware.rst
|
||||
source/api/nova..tests.test_misc.rst
|
||||
source/api/nova..tests.test_network.rst
|
||||
source/api/nova..tests.test_quota.rst
|
||||
source/api/nova..tests.test_rpc.rst
|
||||
source/api/nova..tests.test_scheduler.rst
|
||||
source/api/nova..tests.test_service.rst
|
||||
source/api/nova..tests.test_test.rst
|
||||
source/api/nova..tests.test_twistd.rst
|
||||
source/api/nova..tests.test_utils.rst
|
||||
source/api/nova..tests.test_virt.rst
|
||||
source/api/nova..tests.test_volume.rst
|
||||
source/api/nova..tests.test_xenapi.rst
|
||||
source/api/nova..tests.xenapi.stubs.rst
|
||||
source/api/nova..twistd.rst
|
||||
source/api/nova..utils.rst
|
||||
source/api/nova..version.rst
|
||||
source/api/nova..virt.connection.rst
|
||||
source/api/nova..virt.disk.rst
|
||||
source/api/nova..virt.fake.rst
|
||||
source/api/nova..virt.hyperv.rst
|
||||
source/api/nova..virt.images.rst
|
||||
source/api/nova..virt.libvirt_conn.rst
|
||||
source/api/nova..virt.xenapi.fake.rst
|
||||
source/api/nova..virt.xenapi.network_utils.rst
|
||||
source/api/nova..virt.xenapi.vm_utils.rst
|
||||
source/api/nova..virt.xenapi.vmops.rst
|
||||
source/api/nova..virt.xenapi.volume_utils.rst
|
||||
source/api/nova..virt.xenapi.volumeops.rst
|
||||
source/api/nova..virt.xenapi_conn.rst
|
||||
source/api/nova..volume.api.rst
|
||||
source/api/nova..volume.driver.rst
|
||||
source/api/nova..volume.manager.rst
|
||||
source/api/nova..volume.san.rst
|
||||
source/api/nova..wsgi.rst
|
@ -14,10 +14,16 @@
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
Source for illustrations in doc/source/image_src/zone_distsched_illustrations.odp
|
||||
(OpenOffice Impress format) Illustrations are "exported" to png and then scaled
|
||||
to 400x300 or 640x480 as needed and placed in the doc/source/images directory.
|
||||
|
||||
Distributed Scheduler
|
||||
=====
|
||||
=====================
|
||||
|
||||
The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
|
||||
The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Chance Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
|
||||
|
||||
.. image:: /images/dating_service.png
|
||||
|
||||
But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone.
|
||||
|
||||
@ -25,75 +31,87 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab
|
||||
|
||||
So, how does this all work?
|
||||
|
||||
This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the Zones documentation before reading this.
|
||||
This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the :doc:`devguide/zones` documentation before reading this.
|
||||
|
||||
.. image:: /images/zone_aware_scheduler.png
|
||||
|
||||
Costs & Weights
|
||||
----------
|
||||
When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to putting a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
|
||||
---------------
|
||||
When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
|
||||
|
||||
Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost.
|
||||
|
||||
An example of some other costs might include selecting:
|
||||
* a GPU-based host over a standard CPU
|
||||
* a host with fast ethernet over a 10mbps line
|
||||
* a host that can run Windows instances
|
||||
* a host in the EU vs North America
|
||||
* etc
|
||||
* a GPU-based host over a standard CPU
|
||||
* a host with fast ethernet over a 10mbps line
|
||||
* a host that can run Windows instances
|
||||
* a host in the EU vs North America
|
||||
* etc
|
||||
|
||||
This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly.
|
||||
|
||||
.. image:: /images/costs_weights.png
|
||||
|
||||
nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler
|
||||
-----------
|
||||
------------------------------------------------------
|
||||
As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions.
|
||||
|
||||
Here is how it works:
|
||||
|
||||
1. The compute nodes are filtered and the nodes remaining are weighed.
|
||||
1a. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
|
||||
1b. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
|
||||
2. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
|
||||
3. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
|
||||
4. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
|
||||
1. The compute nodes are filtered and the nodes remaining are weighed.
|
||||
2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
|
||||
3. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
|
||||
4. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
|
||||
5. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
|
||||
6. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
|
||||
|
||||
.. image:: /images/zone_aware_overview.png
|
||||
|
||||
`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used.
|
||||
|
||||
Filtering and Weighing
|
||||
------------
|
||||
----------------------
|
||||
The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible.
|
||||
|
||||
.. image:: /images/filtering.png
|
||||
|
||||
Requesting a new instance
|
||||
------------
|
||||
-------------------------
|
||||
Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table.
|
||||
|
||||
`nova.compute.api.create()` performed the following actions:
|
||||
1. it validated all the fields passed into it.
|
||||
2. it created an entry in the `Instance` table for each instance requested
|
||||
3. it put one `run_instance` message in the scheduler queue for each instance requested
|
||||
4. the schedulers picked off the messages and decided which compute node should handle the request.
|
||||
5. the `run_instance` message was forwarded to the compute node for processing and the instance is created.
|
||||
6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`s are valid.
|
||||
1. it validated all the fields passed into it.
|
||||
2. it created an entry in the `Instance` table for each instance requested
|
||||
3. it put one `run_instance` message in the scheduler queue for each instance requested
|
||||
4. the schedulers picked off the messages and decided which compute node should handle the request.
|
||||
5. the `run_instance` message was forwarded to the compute node for processing and the instance is created.
|
||||
6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_ids` are valid.
|
||||
|
||||
.. image:: /images/nova.compute.api.create.png
|
||||
|
||||
Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones.
|
||||
|
||||
The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once.
|
||||
|
||||
For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently:
|
||||
1. it validates all the fields passed into it.
|
||||
2. it creates a single `reservation_id` for all of instances created. This is a UUID.
|
||||
3. it creates a single `run_instance` request in the scheduler queue
|
||||
4. a scheduler picks the message off the queue and works on it.
|
||||
5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`.
|
||||
6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones.
|
||||
7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well.
|
||||
8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed.
|
||||
9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user.
|
||||
10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`.
|
||||
1. it validates all the fields passed into it.
|
||||
2. it creates a single `reservation_id` for all of instances created. This is a UUID.
|
||||
3. it creates a single `run_instance` request in the scheduler queue
|
||||
4. a scheduler picks the message off the queue and works on it.
|
||||
5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`.
|
||||
6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones.
|
||||
7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well.
|
||||
8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed.
|
||||
9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user.
|
||||
10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`.
|
||||
|
||||
.. image:: /images/nova.compute.api.create_all_at_once.png
|
||||
|
||||
The Catch
|
||||
-------------
|
||||
---------
|
||||
This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world.
|
||||
|
||||
When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates.
|
||||
When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates.
|
||||
|
||||
Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key`
|
||||
|
||||
@ -108,7 +126,7 @@ NOTE: The features described in this section are related to the up-coming 'merge
|
||||
|
||||
The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created.
|
||||
|
||||
NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
|
||||
NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would not be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
|
||||
|
||||
We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches.
|
||||
|
||||
@ -117,7 +135,7 @@ Finally, we need to give the user a way to get information on each of the instan
|
||||
`python-novaclient` will be extended to support both of these changes.
|
||||
|
||||
Host Filter
|
||||
--------------
|
||||
-----------
|
||||
|
||||
As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms.
|
||||
|
||||
@ -130,21 +148,22 @@ The filter used is determined by the `--default_host_filter` flag, which points
|
||||
To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (`<hostname>`, `<additional data>`) where `<additional data>` is whatever you want it to be.
|
||||
|
||||
Cost Scheduler Weighing
|
||||
--------------
|
||||
-----------------------
|
||||
Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled.
|
||||
|
||||
Simple Zone Aware Scheduling
|
||||
--------------
|
||||
The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
|
||||
----------------------------
|
||||
The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
|
||||
|
||||
The `--scheduler_driver` flag is how you specify the scheduler class name.
|
||||
|
||||
Flags
|
||||
--------------
|
||||
-----
|
||||
|
||||
All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file:
|
||||
|
||||
::
|
||||
|
||||
--allow_admin_api=true
|
||||
--enable_zone_routing=true
|
||||
--zone_name=zone1
|
||||
@ -162,6 +181,7 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf
|
||||
Some optional flags which are handy for debugging are:
|
||||
|
||||
::
|
||||
|
||||
--connection_type=fake
|
||||
--verbose
|
||||
|
||||
|
@ -21,7 +21,7 @@ A Nova deployment is called a Zone. A Zone allows you to partition your deployme
|
||||
|
||||
The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion.
|
||||
|
||||
Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones.
|
||||
Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones and, in those cases, the grandchild's internal structure would not be known to the grand-parent.
|
||||
|
||||
Zones share nothing. They communicate via the public OpenStack API only. No database, queue, user or project definition is shared between Zones.
|
||||
|
||||
@ -99,7 +99,7 @@ You can get the `child zone api url`, `nova api key` and `username` from the `no
|
||||
export NOVA_URL="http://192.168.2.120:8774/v1.0/"
|
||||
|
||||
|
||||
This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
|
||||
This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done with this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
|
||||
|
||||
Getting a list of child Zones
|
||||
-----------------------------
|
||||
|
BIN
doc/source/image_src/zones_distsched_illustrations.odp
Executable file
BIN
doc/source/images/costs_weights.png
Normal file
After Width: | Height: | Size: 35 KiB |
BIN
doc/source/images/dating_service.png
Normal file
After Width: | Height: | Size: 31 KiB |
BIN
doc/source/images/filtering.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
doc/source/images/nova.compute.api.create.png
Executable file
After Width: | Height: | Size: 49 KiB |
BIN
doc/source/images/nova.compute.api.create_all_at_once.png
Executable file
After Width: | Height: | Size: 61 KiB |
BIN
doc/source/images/zone_aware_overview.png
Executable file
After Width: | Height: | Size: 55 KiB |
BIN
doc/source/images/zone_aware_scheduler.png
Normal file
After Width: | Height: | Size: 20 KiB |
@ -81,7 +81,9 @@ class APIRouter(base_wsgi.Router):
|
||||
self._setup_routes(mapper)
|
||||
super(APIRouter, self).__init__(mapper)
|
||||
|
||||
def _setup_routes(self, mapper):
|
||||
def _setup_routes(self, mapper, version):
|
||||
"""Routes common to all versions."""
|
||||
|
||||
server_members = self.server_members
|
||||
server_members['action'] = 'POST'
|
||||
if FLAGS.allow_admin_api:
|
||||
@ -98,11 +100,6 @@ class APIRouter(base_wsgi.Router):
|
||||
server_members['reset_network'] = 'POST'
|
||||
server_members['inject_network_info'] = 'POST'
|
||||
|
||||
mapper.resource("zone", "zones",
|
||||
controller=zones.create_resource(),
|
||||
collection={'detail': 'GET', 'info': 'GET',
|
||||
'select': 'POST'})
|
||||
|
||||
mapper.resource("user", "users",
|
||||
controller=users.create_resource(),
|
||||
collection={'detail': 'GET'})
|
||||
@ -111,10 +108,34 @@ class APIRouter(base_wsgi.Router):
|
||||
controller=accounts.create_resource(),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("zone", "zones",
|
||||
controller=zones.create_resource(version),
|
||||
collection={'detail': 'GET',
|
||||
'info': 'GET',
|
||||
'select': 'POST',
|
||||
'boot': 'POST'
|
||||
})
|
||||
|
||||
mapper.resource("console", "consoles",
|
||||
controller=consoles.create_resource(),
|
||||
parent_resource=dict(member_name='server',
|
||||
collection_name='servers'))
|
||||
controller=consoles.create_resource(),
|
||||
parent_resource=dict(member_name='server',
|
||||
collection_name='servers'))
|
||||
|
||||
mapper.resource("server", "servers",
|
||||
controller=servers.create_resource(version),
|
||||
collection={'detail': 'GET'},
|
||||
member=self.server_members)
|
||||
|
||||
mapper.resource("image", "images",
|
||||
controller=images.create_resource(version),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("limit", "limits",
|
||||
controller=limits.create_resource(version))
|
||||
|
||||
mapper.resource("flavor", "flavors",
|
||||
controller=flavors.create_resource(version),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
super(APIRouter, self).__init__(mapper)
|
||||
|
||||
@ -123,20 +144,11 @@ class APIRouterV10(APIRouter):
|
||||
"""Define routes specific to OpenStack API V1.0."""
|
||||
|
||||
def _setup_routes(self, mapper):
|
||||
super(APIRouterV10, self)._setup_routes(mapper)
|
||||
mapper.resource("server", "servers",
|
||||
controller=servers.create_resource('1.0'),
|
||||
collection={'detail': 'GET'},
|
||||
member=self.server_members)
|
||||
|
||||
super(APIRouterV10, self)._setup_routes(mapper, '1.0')
|
||||
mapper.resource("image", "images",
|
||||
controller=images.create_resource('1.0'),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("flavor", "flavors",
|
||||
controller=flavors.create_resource('1.0'),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("shared_ip_group", "shared_ip_groups",
|
||||
collection={'detail': 'GET'},
|
||||
controller=shared_ip_groups.create_resource())
|
||||
@ -146,9 +158,6 @@ class APIRouterV10(APIRouter):
|
||||
parent_resource=dict(member_name='server',
|
||||
collection_name='servers'))
|
||||
|
||||
mapper.resource("limit", "limits",
|
||||
controller=limits.create_resource('1.0'))
|
||||
|
||||
mapper.resource("ip", "ips", controller=ips.create_resource(),
|
||||
collection=dict(public='GET', private='GET'),
|
||||
parent_resource=dict(member_name='server',
|
||||
@ -159,16 +168,7 @@ class APIRouterV11(APIRouter):
|
||||
"""Define routes specific to OpenStack API V1.1."""
|
||||
|
||||
def _setup_routes(self, mapper):
|
||||
super(APIRouterV11, self)._setup_routes(mapper)
|
||||
mapper.resource("server", "servers",
|
||||
controller=servers.create_resource('1.1'),
|
||||
collection={'detail': 'GET'},
|
||||
member=self.server_members)
|
||||
|
||||
mapper.resource("image", "images",
|
||||
controller=images.create_resource('1.1'),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
super(APIRouterV11, self)._setup_routes(mapper, '1.1')
|
||||
mapper.resource("image_meta", "meta",
|
||||
controller=image_metadata.create_resource(),
|
||||
parent_resource=dict(member_name='image',
|
||||
@ -178,10 +178,3 @@ class APIRouterV11(APIRouter):
|
||||
controller=server_metadata.create_resource(),
|
||||
parent_resource=dict(member_name='server',
|
||||
collection_name='servers'))
|
||||
|
||||
mapper.resource("flavor", "flavors",
|
||||
controller=flavors.create_resource('1.1'),
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
mapper.resource("limit", "limits",
|
||||
controller=limits.create_resource('1.1'))
|
||||
|
@ -26,8 +26,6 @@ from nova import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.api.openstack.common')
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
|
346
nova/api/openstack/create_instance_helper.py
Normal file
@ -0,0 +1,346 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import re
|
||||
import webob
|
||||
|
||||
from webob import exc
|
||||
from xml.dom import minidom
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
import nova.image
|
||||
from nova import quota
|
||||
from nova import utils
|
||||
|
||||
from nova.compute import instance_types
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.auth import manager as auth_manager
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.api.openstack.create_instance_helper')
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class CreateFault(exception.NovaException):
|
||||
message = _("Invalid parameters given to create_instance.")
|
||||
|
||||
def __init__(self, fault):
|
||||
self.fault = fault
|
||||
super(CreateFault, self).__init__()
|
||||
|
||||
|
||||
class CreateInstanceHelper(object):
|
||||
"""This is the base class for OS API Controllers that
|
||||
are capable of creating instances (currently Servers and Zones).
|
||||
|
||||
Once we stabilize the Zones portion of the API we may be able
|
||||
to move this code back into servers.py
|
||||
"""
|
||||
|
||||
def __init__(self, controller):
|
||||
"""We need the image service to create an instance."""
|
||||
self.controller = controller
|
||||
self._image_service = utils.import_object(FLAGS.image_service)
|
||||
super(CreateInstanceHelper, self).__init__()
|
||||
|
||||
def create_instance(self, req, body, create_method):
|
||||
"""Creates a new server for the given user. The approach
|
||||
used depends on the create_method. For example, the standard
|
||||
POST /server call uses compute.api.create(), while
|
||||
POST /zones/server uses compute.api.create_all_at_once().
|
||||
|
||||
The problem is, both approaches return different values (i.e.
|
||||
[instance dicts] vs. reservation_id). So the handling of the
|
||||
return type from this method is left to the caller.
|
||||
"""
|
||||
if not body:
|
||||
raise faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
|
||||
context = req.environ['nova.context']
|
||||
|
||||
password = self.controller._get_server_admin_password(body['server'])
|
||||
|
||||
key_name = None
|
||||
key_data = None
|
||||
key_pairs = auth_manager.AuthManager.get_key_pairs(context)
|
||||
if key_pairs:
|
||||
key_pair = key_pairs[0]
|
||||
key_name = key_pair['name']
|
||||
key_data = key_pair['public_key']
|
||||
|
||||
image_href = self.controller._image_ref_from_req_data(body)
|
||||
try:
|
||||
image_service, image_id = nova.image.get_image_service(image_href)
|
||||
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
|
||||
req, image_id)
|
||||
images = set([str(x['id']) for x in image_service.index(context)])
|
||||
assert str(image_id) in images
|
||||
except Exception, e:
|
||||
msg = _("Cannot find requested image %(image_href)s: %(e)s" %
|
||||
locals())
|
||||
raise faults.Fault(exc.HTTPBadRequest(msg))
|
||||
|
||||
personality = body['server'].get('personality')
|
||||
|
||||
injected_files = []
|
||||
if personality:
|
||||
injected_files = self._get_injected_files(personality)
|
||||
|
||||
flavor_id = self.controller._flavor_id_from_req_data(body)
|
||||
|
||||
if not 'name' in body['server']:
|
||||
msg = _("Server name is not defined")
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
|
||||
zone_blob = body['server'].get('blob')
|
||||
name = body['server']['name']
|
||||
self._validate_server_name(name)
|
||||
name = name.strip()
|
||||
|
||||
reservation_id = body['server'].get('reservation_id')
|
||||
|
||||
try:
|
||||
inst_type = \
|
||||
instance_types.get_instance_type_by_flavor_id(flavor_id)
|
||||
extra_values = {
|
||||
'instance_type': inst_type,
|
||||
'image_ref': image_href,
|
||||
'password': password
|
||||
}
|
||||
|
||||
return (extra_values,
|
||||
create_method(context,
|
||||
inst_type,
|
||||
image_id,
|
||||
kernel_id=kernel_id,
|
||||
ramdisk_id=ramdisk_id,
|
||||
display_name=name,
|
||||
display_description=name,
|
||||
key_name=key_name,
|
||||
key_data=key_data,
|
||||
metadata=body['server'].get('metadata', {}),
|
||||
injected_files=injected_files,
|
||||
admin_password=password,
|
||||
zone_blob=zone_blob,
|
||||
reservation_id=reservation_id
|
||||
)
|
||||
)
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
except exception.ImageNotFound as error:
|
||||
msg = _("Can not find requested image")
|
||||
raise faults.Fault(exc.HTTPBadRequest(msg))
|
||||
|
||||
# Let the caller deal with unhandled exceptions.
|
||||
|
||||
def _handle_quota_error(self, error):
|
||||
"""
|
||||
Reraise quota errors as api-specific http exceptions
|
||||
"""
|
||||
if error.code == "OnsetFileLimitExceeded":
|
||||
expl = _("Personality file limit exceeded")
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
if error.code == "OnsetFilePathLimitExceeded":
|
||||
expl = _("Personality file path too long")
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
if error.code == "OnsetFileContentLimitExceeded":
|
||||
expl = _("Personality file content too long")
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
# if the original error is okay, just reraise it
|
||||
raise error
|
||||
|
||||
def _deserialize_create(self, request):
|
||||
"""
|
||||
Deserialize a create request
|
||||
|
||||
Overrides normal behavior in the case of xml content
|
||||
"""
|
||||
if request.content_type == "application/xml":
|
||||
deserializer = ServerCreateRequestXMLDeserializer()
|
||||
return deserializer.deserialize(request.body)
|
||||
else:
|
||||
return self._deserialize(request.body, request.get_content_type())
|
||||
|
||||
def _validate_server_name(self, value):
|
||||
if not isinstance(value, basestring):
|
||||
msg = _("Server name is not a string or unicode")
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
|
||||
if value.strip() == '':
|
||||
msg = _("Server name is an empty string")
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
|
||||
def _get_kernel_ramdisk_from_image(self, req, image_id):
|
||||
"""Fetch an image from the ImageService, then if present, return the
|
||||
associated kernel and ramdisk image IDs.
|
||||
"""
|
||||
context = req.environ['nova.context']
|
||||
image_meta = self._image_service.show(context, image_id)
|
||||
# NOTE(sirp): extracted to a separate method to aid unit-testing, the
|
||||
# new method doesn't need a request obj or an ImageService stub
|
||||
kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image(
|
||||
image_meta)
|
||||
return kernel_id, ramdisk_id
|
||||
|
||||
@staticmethod
|
||||
def _do_get_kernel_ramdisk_from_image(image_meta):
|
||||
"""Given an ImageService image_meta, return kernel and ramdisk image
|
||||
ids if present.
|
||||
|
||||
This is only valid for `ami` style images.
|
||||
"""
|
||||
image_id = image_meta['id']
|
||||
if image_meta['status'] != 'active':
|
||||
raise exception.ImageUnacceptable(image_id=image_id,
|
||||
reason=_("status is not active"))
|
||||
|
||||
if image_meta.get('container_format') != 'ami':
|
||||
return None, None
|
||||
|
||||
try:
|
||||
kernel_id = image_meta['properties']['kernel_id']
|
||||
except KeyError:
|
||||
raise exception.KernelNotFoundForImage(image_id=image_id)
|
||||
|
||||
try:
|
||||
ramdisk_id = image_meta['properties']['ramdisk_id']
|
||||
except KeyError:
|
||||
raise exception.RamdiskNotFoundForImage(image_id=image_id)
|
||||
|
||||
return kernel_id, ramdisk_id
|
||||
|
||||
def _get_injected_files(self, personality):
|
||||
"""
|
||||
Create a list of injected files from the personality attribute
|
||||
|
||||
At this time, injected_files must be formatted as a list of
|
||||
(file_path, file_content) pairs for compatibility with the
|
||||
underlying compute service.
|
||||
"""
|
||||
injected_files = []
|
||||
|
||||
for item in personality:
|
||||
try:
|
||||
path = item['path']
|
||||
contents = item['contents']
|
||||
except KeyError as key:
|
||||
expl = _('Bad personality format: missing %s') % key
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
except TypeError:
|
||||
expl = _('Bad personality format')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
try:
|
||||
contents = base64.b64decode(contents)
|
||||
except TypeError:
|
||||
expl = _('Personality content for %s cannot be decoded') % path
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
injected_files.append((path, contents))
|
||||
return injected_files
|
||||
|
||||
def _get_server_admin_password_old_style(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return utils.generate_password(16)
|
||||
|
||||
def _get_server_admin_password_new_style(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
password = server.get('adminPass')
|
||||
|
||||
if password is None:
|
||||
return utils.generate_password(16)
|
||||
if not isinstance(password, basestring) or password == '':
|
||||
msg = _("Invalid adminPass")
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
return password
|
||||
|
||||
|
||||
class ServerXMLDeserializer(wsgi.XMLDeserializer):
|
||||
"""
|
||||
Deserializer to handle xml-formatted server create requests.
|
||||
|
||||
Handles standard server attributes as well as optional metadata
|
||||
and personality attributes
|
||||
"""
|
||||
|
||||
def create(self, string):
|
||||
"""Deserialize an xml-formatted server create request"""
|
||||
dom = minidom.parseString(string)
|
||||
server = self._extract_server(dom)
|
||||
return {'server': server}
|
||||
|
||||
def _extract_server(self, node):
|
||||
"""Marshal the server attribute of a parsed request"""
|
||||
server = {}
|
||||
server_node = self._find_first_child_named(node, 'server')
|
||||
for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]:
|
||||
if server_node.getAttribute(attr):
|
||||
server[attr] = server_node.getAttribute(attr)
|
||||
metadata = self._extract_metadata(server_node)
|
||||
if metadata is not None:
|
||||
server["metadata"] = metadata
|
||||
personality = self._extract_personality(server_node)
|
||||
if personality is not None:
|
||||
server["personality"] = personality
|
||||
return server
|
||||
|
||||
def _extract_metadata(self, server_node):
|
||||
"""Marshal the metadata attribute of a parsed request"""
|
||||
metadata_node = self._find_first_child_named(server_node, "metadata")
|
||||
if metadata_node is None:
|
||||
return None
|
||||
metadata = {}
|
||||
for meta_node in self._find_children_named(metadata_node, "meta"):
|
||||
key = meta_node.getAttribute("key")
|
||||
metadata[key] = self._extract_text(meta_node)
|
||||
return metadata
|
||||
|
||||
def _extract_personality(self, server_node):
|
||||
"""Marshal the personality attribute of a parsed request"""
|
||||
personality_node = \
|
||||
self._find_first_child_named(server_node, "personality")
|
||||
if personality_node is None:
|
||||
return None
|
||||
personality = []
|
||||
for file_node in self._find_children_named(personality_node, "file"):
|
||||
item = {}
|
||||
if file_node.hasAttribute("path"):
|
||||
item["path"] = file_node.getAttribute("path")
|
||||
item["contents"] = self._extract_text(file_node)
|
||||
personality.append(item)
|
||||
return personality
|
||||
|
||||
def _find_first_child_named(self, parent, name):
|
||||
"""Search a nodes children for the first child with a given name"""
|
||||
for node in parent.childNodes:
|
||||
if node.nodeName == name:
|
||||
return node
|
||||
return None
|
||||
|
||||
def _find_children_named(self, parent, name):
|
||||
"""Return all of a nodes children who have the given name"""
|
||||
for node in parent.childNodes:
|
||||
if node.nodeName == name:
|
||||
yield node
|
||||
|
||||
def _extract_text(self, node):
|
||||
"""Get the text field contained by the given node"""
|
||||
if len(node.childNodes) == 1:
|
||||
child = node.childNodes[0]
|
||||
if child.nodeType == child.TEXT_NODE:
|
||||
return child.nodeValue
|
||||
return ""
|
@ -99,7 +99,7 @@ def create_resource(version='1.0'):
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
|
||||
metadata=metadata)
|
||||
metadata=metadata),
|
||||
}
|
||||
|
||||
return wsgi.Resource(controller, serializers=serializers)
|
||||
|
@ -37,12 +37,18 @@ class Controller(object):
|
||||
meta_dict[key] = value
|
||||
return dict(metadata=meta_dict)
|
||||
|
||||
def _check_body(self, body):
|
||||
if body == None or body == "":
|
||||
expl = _('No Request Body')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
|
||||
def index(self, req, server_id):
|
||||
""" Returns the list of metadata for a given instance """
|
||||
context = req.environ['nova.context']
|
||||
return self._get_metadata(context, server_id)
|
||||
|
||||
def create(self, req, server_id, body):
|
||||
self._check_body(body)
|
||||
context = req.environ['nova.context']
|
||||
metadata = body.get('metadata')
|
||||
try:
|
||||
@ -51,9 +57,10 @@ class Controller(object):
|
||||
metadata)
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
return req.body
|
||||
return body
|
||||
|
||||
def update(self, req, server_id, id, body):
|
||||
self._check_body(body)
|
||||
context = req.environ['nova.context']
|
||||
if not id in body:
|
||||
expl = _('Request body and URI mismatch')
|
||||
@ -68,7 +75,7 @@ class Controller(object):
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
|
||||
return req.body
|
||||
return body
|
||||
|
||||
def show(self, req, server_id, id):
|
||||
""" Return a single metadata item """
|
||||
|
@ -17,24 +17,20 @@ import base64
|
||||
import traceback
|
||||
|
||||
from webob import exc
|
||||
from xml.dom import minidom
|
||||
|
||||
from nova import compute
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
import nova.image
|
||||
from nova import log as logging
|
||||
from nova import quota
|
||||
from nova import utils
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import create_instance_helper as helper
|
||||
from nova.api.openstack import faults
|
||||
import nova.api.openstack.views.addresses
|
||||
import nova.api.openstack.views.flavors
|
||||
import nova.api.openstack.views.images
|
||||
import nova.api.openstack.views.servers
|
||||
from nova.api.openstack import wsgi
|
||||
from nova.auth import manager as auth_manager
|
||||
from nova.compute import instance_types
|
||||
import nova.api.openstack
|
||||
from nova.scheduler import api as scheduler_api
|
||||
|
||||
@ -48,7 +44,7 @@ class Controller(object):
|
||||
|
||||
def __init__(self):
|
||||
self.compute_api = compute.API()
|
||||
self._image_service = utils.import_object(FLAGS.image_service)
|
||||
self.helper = helper.CreateInstanceHelper(self)
|
||||
|
||||
def index(self, req):
|
||||
""" Returns a list of server names and ids for a given user """
|
||||
@ -66,12 +62,6 @@ class Controller(object):
|
||||
return exc.HTTPBadRequest(str(err))
|
||||
return servers
|
||||
|
||||
def _image_ref_from_req_data(self, data):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _flavor_id_from_req_data(self, data):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_view_builder(self, req):
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -86,7 +76,10 @@ class Controller(object):
|
||||
|
||||
builder - the response model builder
|
||||
"""
|
||||
instance_list = self.compute_api.get_all(req.environ['nova.context'])
|
||||
reservation_id = req.str_GET.get('reservation_id')
|
||||
instance_list = self.compute_api.get_all(
|
||||
req.environ['nova.context'],
|
||||
reservation_id=reservation_id)
|
||||
limited_list = self._limit_items(instance_list, req)
|
||||
builder = self._get_view_builder(req)
|
||||
servers = [builder.build(inst, is_detail)['server']
|
||||
@ -115,128 +108,25 @@ class Controller(object):
|
||||
|
||||
def create(self, req, body):
|
||||
""" Creates a new server for a given user """
|
||||
if not body:
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
|
||||
context = req.environ['nova.context']
|
||||
|
||||
password = self._get_server_admin_password(body['server'])
|
||||
|
||||
key_name = None
|
||||
key_data = None
|
||||
key_pairs = auth_manager.AuthManager.get_key_pairs(context)
|
||||
if key_pairs:
|
||||
key_pair = key_pairs[0]
|
||||
key_name = key_pair['name']
|
||||
key_data = key_pair['public_key']
|
||||
|
||||
image_href = self._image_ref_from_req_data(body)
|
||||
extra_values = None
|
||||
result = None
|
||||
try:
|
||||
image_service, image_id = nova.image.get_image_service(image_href)
|
||||
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
|
||||
req, image_service, image_id)
|
||||
images = set([str(x['id']) for x in image_service.index(context)])
|
||||
assert str(image_id) in images
|
||||
except:
|
||||
msg = _("Cannot find requested image %s") % image_href
|
||||
return faults.Fault(exc.HTTPBadRequest(msg))
|
||||
extra_values, result = self.helper.create_instance(
|
||||
req, body, self.compute_api.create)
|
||||
except faults.Fault, f:
|
||||
return f
|
||||
|
||||
personality = body['server'].get('personality')
|
||||
instances = result
|
||||
|
||||
injected_files = []
|
||||
if personality:
|
||||
injected_files = self._get_injected_files(personality)
|
||||
|
||||
flavor_id = self._flavor_id_from_req_data(body)
|
||||
|
||||
if not 'name' in body['server']:
|
||||
msg = _("Server name is not defined")
|
||||
return exc.HTTPBadRequest(msg)
|
||||
|
||||
zone_blob = body['server'].get('blob')
|
||||
name = body['server']['name']
|
||||
self._validate_server_name(name)
|
||||
name = name.strip()
|
||||
|
||||
try:
|
||||
inst_type = \
|
||||
instance_types.get_instance_type_by_flavor_id(flavor_id)
|
||||
(inst,) = self.compute_api.create(
|
||||
context,
|
||||
inst_type,
|
||||
image_href,
|
||||
kernel_id=kernel_id,
|
||||
ramdisk_id=ramdisk_id,
|
||||
display_name=name,
|
||||
display_description=name,
|
||||
key_name=key_name,
|
||||
key_data=key_data,
|
||||
metadata=body['server'].get('metadata', {}),
|
||||
injected_files=injected_files,
|
||||
admin_password=password,
|
||||
zone_blob=zone_blob)
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
except exception.ImageNotFound as error:
|
||||
msg = _("Can not find requested image")
|
||||
return faults.Fault(exc.HTTPBadRequest(msg))
|
||||
|
||||
inst['instance_type'] = inst_type
|
||||
inst['image_ref'] = image_href
|
||||
(inst, ) = instances
|
||||
for key in ['instance_type', 'image_ref']:
|
||||
inst[key] = extra_values[key]
|
||||
|
||||
builder = self._get_view_builder(req)
|
||||
server = builder.build(inst, is_detail=True)
|
||||
server['server']['adminPass'] = password
|
||||
server['server']['adminPass'] = extra_values['password']
|
||||
return server
|
||||
|
||||
def _get_injected_files(self, personality):
|
||||
"""
|
||||
Create a list of injected files from the personality attribute
|
||||
|
||||
At this time, injected_files must be formatted as a list of
|
||||
(file_path, file_content) pairs for compatibility with the
|
||||
underlying compute service.
|
||||
"""
|
||||
injected_files = []
|
||||
|
||||
for item in personality:
|
||||
try:
|
||||
path = item['path']
|
||||
contents = item['contents']
|
||||
except KeyError as key:
|
||||
expl = _('Bad personality format: missing %s') % key
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
except TypeError:
|
||||
expl = _('Bad personality format')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
try:
|
||||
contents = base64.b64decode(contents)
|
||||
except TypeError:
|
||||
expl = _('Personality content for %s cannot be decoded') % path
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
injected_files.append((path, contents))
|
||||
return injected_files
|
||||
|
||||
def _handle_quota_error(self, error):
|
||||
"""
|
||||
Reraise quota errors as api-specific http exceptions
|
||||
"""
|
||||
if error.code == "OnsetFileLimitExceeded":
|
||||
expl = _("Personality file limit exceeded")
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
if error.code == "OnsetFilePathLimitExceeded":
|
||||
expl = _("Personality file path too long")
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
if error.code == "OnsetFileContentLimitExceeded":
|
||||
expl = _("Personality file content too long")
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
# if the original error is okay, just reraise it
|
||||
raise error
|
||||
|
||||
def _get_server_admin_password(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return utils.generate_password(16)
|
||||
|
||||
@scheduler_api.redirect_handler
|
||||
def update(self, req, id, body):
|
||||
""" Updates the server name or password """
|
||||
@ -251,7 +141,7 @@ class Controller(object):
|
||||
|
||||
if 'name' in body['server']:
|
||||
name = body['server']['name']
|
||||
self._validate_server_name(name)
|
||||
self.helper._validate_server_name(name)
|
||||
update_dict['display_name'] = name.strip()
|
||||
|
||||
self._parse_update(ctxt, id, body, update_dict)
|
||||
@ -263,15 +153,6 @@ class Controller(object):
|
||||
|
||||
return exc.HTTPNoContent()
|
||||
|
||||
def _validate_server_name(self, value):
|
||||
if not isinstance(value, basestring):
|
||||
msg = _("Server name is not a string or unicode")
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
|
||||
if value.strip() == '':
|
||||
msg = _("Server name is an empty string")
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
|
||||
def _parse_update(self, context, id, inst_dict, update_dict):
|
||||
pass
|
||||
|
||||
@ -520,45 +401,9 @@ class Controller(object):
|
||||
error=item.error))
|
||||
return dict(actions=actions)
|
||||
|
||||
def _get_kernel_ramdisk_from_image(self, req, image_service, image_id):
|
||||
"""Fetch an image from the ImageService, then if present, return the
|
||||
associated kernel and ramdisk image IDs.
|
||||
"""
|
||||
context = req.environ['nova.context']
|
||||
image_meta = image_service.show(context, image_id)
|
||||
# NOTE(sirp): extracted to a separate method to aid unit-testing, the
|
||||
# new method doesn't need a request obj or an ImageService stub
|
||||
return self._do_get_kernel_ramdisk_from_image(image_meta)
|
||||
|
||||
@staticmethod
|
||||
def _do_get_kernel_ramdisk_from_image(image_meta):
|
||||
"""Given an ImageService image_meta, return kernel and ramdisk image
|
||||
ids if present.
|
||||
|
||||
This is only valid for `ami` style images.
|
||||
"""
|
||||
image_id = image_meta['id']
|
||||
if image_meta['status'] != 'active':
|
||||
raise exception.ImageUnacceptable(image_id=image_id,
|
||||
reason=_("status is not active"))
|
||||
|
||||
if image_meta.get('container_format') != 'ami':
|
||||
return None, None
|
||||
|
||||
try:
|
||||
kernel_id = image_meta['properties']['kernel_id']
|
||||
except KeyError:
|
||||
raise exception.KernelNotFoundForImage(image_id=image_id)
|
||||
|
||||
try:
|
||||
ramdisk_id = image_meta['properties']['ramdisk_id']
|
||||
except KeyError:
|
||||
raise exception.RamdiskNotFoundForImage(image_id=image_id)
|
||||
|
||||
return kernel_id, ramdisk_id
|
||||
|
||||
|
||||
class ControllerV10(Controller):
|
||||
|
||||
def _image_ref_from_req_data(self, data):
|
||||
return data['server']['imageId']
|
||||
|
||||
@ -615,6 +460,10 @@ class ControllerV10(Controller):
|
||||
response.empty_body = True
|
||||
return response
|
||||
|
||||
def _get_server_admin_password(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return self.helper._get_server_admin_password_old_style(server)
|
||||
|
||||
|
||||
class ControllerV11(Controller):
|
||||
def _image_ref_from_req_data(self, data):
|
||||
@ -724,92 +573,12 @@ class ControllerV11(Controller):
|
||||
response.empty_body = True
|
||||
return response
|
||||
|
||||
def get_default_xmlns(self, req):
|
||||
return common.XML_NS_V11
|
||||
|
||||
def _get_server_admin_password(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
password = server.get('adminPass')
|
||||
if password is None:
|
||||
return utils.generate_password(16)
|
||||
if not isinstance(password, basestring) or password == '':
|
||||
msg = _("Invalid adminPass")
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
return password
|
||||
|
||||
|
||||
class ServerXMLDeserializer(wsgi.XMLDeserializer):
|
||||
"""
|
||||
Deserializer to handle xml-formatted server create requests.
|
||||
|
||||
Handles standard server attributes as well as optional metadata
|
||||
and personality attributes
|
||||
"""
|
||||
|
||||
def create(self, string):
|
||||
"""Deserialize an xml-formatted server create request"""
|
||||
dom = minidom.parseString(string)
|
||||
server = self._extract_server(dom)
|
||||
return {'server': server}
|
||||
|
||||
def _extract_server(self, node):
|
||||
"""Marshal the server attribute of a parsed request"""
|
||||
server = {}
|
||||
server_node = self._find_first_child_named(node, 'server')
|
||||
for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]:
|
||||
if server_node.getAttribute(attr):
|
||||
server[attr] = server_node.getAttribute(attr)
|
||||
metadata = self._extract_metadata(server_node)
|
||||
if metadata is not None:
|
||||
server["metadata"] = metadata
|
||||
personality = self._extract_personality(server_node)
|
||||
if personality is not None:
|
||||
server["personality"] = personality
|
||||
return server
|
||||
|
||||
def _extract_metadata(self, server_node):
|
||||
"""Marshal the metadata attribute of a parsed request"""
|
||||
metadata_node = self._find_first_child_named(server_node, "metadata")
|
||||
if metadata_node is None:
|
||||
return None
|
||||
metadata = {}
|
||||
for meta_node in self._find_children_named(metadata_node, "meta"):
|
||||
key = meta_node.getAttribute("key")
|
||||
metadata[key] = self._extract_text(meta_node)
|
||||
return metadata
|
||||
|
||||
def _extract_personality(self, server_node):
|
||||
"""Marshal the personality attribute of a parsed request"""
|
||||
personality_node = \
|
||||
self._find_first_child_named(server_node, "personality")
|
||||
if personality_node is None:
|
||||
return None
|
||||
personality = []
|
||||
for file_node in self._find_children_named(personality_node, "file"):
|
||||
item = {}
|
||||
if file_node.hasAttribute("path"):
|
||||
item["path"] = file_node.getAttribute("path")
|
||||
item["contents"] = self._extract_text(file_node)
|
||||
personality.append(item)
|
||||
return personality
|
||||
|
||||
def _find_first_child_named(self, parent, name):
|
||||
"""Search a nodes children for the first child with a given name"""
|
||||
for node in parent.childNodes:
|
||||
if node.nodeName == name:
|
||||
return node
|
||||
return None
|
||||
|
||||
def _find_children_named(self, parent, name):
|
||||
"""Return all of a nodes children who have the given name"""
|
||||
for node in parent.childNodes:
|
||||
if node.nodeName == name:
|
||||
yield node
|
||||
|
||||
def _extract_text(self, node):
|
||||
"""Get the text field contained by the given node"""
|
||||
if len(node.childNodes) == 1:
|
||||
child = node.childNodes[0]
|
||||
if child.nodeType == child.TEXT_NODE:
|
||||
return child.nodeValue
|
||||
return ""
|
||||
return self.helper._get_server_admin_password_new_style(server)
|
||||
|
||||
|
||||
def create_resource(version='1.0'):
|
||||
@ -845,7 +614,7 @@ def create_resource(version='1.0'):
|
||||
}
|
||||
|
||||
deserializers = {
|
||||
'application/xml': ServerXMLDeserializer(),
|
||||
'application/xml': helper.ServerXMLDeserializer(),
|
||||
}
|
||||
|
||||
return wsgi.Resource(controller, serializers=serializers,
|
||||
|
@ -42,12 +42,15 @@ class ViewBuilder(object):
|
||||
|
||||
def build(self, inst, is_detail):
|
||||
"""Return a dict that represenst a server."""
|
||||
if is_detail:
|
||||
server = self._build_detail(inst)
|
||||
if inst.get('_is_precooked', False):
|
||||
server = dict(server=inst)
|
||||
else:
|
||||
server = self._build_simple(inst)
|
||||
if is_detail:
|
||||
server = self._build_detail(inst)
|
||||
else:
|
||||
server = self._build_simple(inst)
|
||||
|
||||
self._build_extra(server, inst)
|
||||
self._build_extra(server, inst)
|
||||
|
||||
return server
|
||||
|
||||
@ -79,6 +82,7 @@ class ViewBuilder(object):
|
||||
|
||||
ctxt = nova.context.get_admin_context()
|
||||
compute_api = nova.compute.API()
|
||||
|
||||
if compute_api.has_finished_migration(ctxt, inst['id']):
|
||||
inst_dict['status'] = 'RESIZE-CONFIRM'
|
||||
|
||||
|
@ -2,7 +2,9 @@
|
||||
import json
|
||||
import webob
|
||||
from xml.dom import minidom
|
||||
from xml.parsers import expat
|
||||
|
||||
import faults
|
||||
from nova import exception
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
@ -60,7 +62,7 @@ class TextDeserializer(object):
|
||||
|
||||
def deserialize(self, datastring, action='default'):
|
||||
"""Find local deserialization method and parse request body."""
|
||||
action_method = getattr(self, action, self.default)
|
||||
action_method = getattr(self, str(action), self.default)
|
||||
return action_method(datastring)
|
||||
|
||||
def default(self, datastring):
|
||||
@ -71,7 +73,11 @@ class TextDeserializer(object):
|
||||
class JSONDeserializer(TextDeserializer):
|
||||
|
||||
def default(self, datastring):
|
||||
return utils.loads(datastring)
|
||||
try:
|
||||
return utils.loads(datastring)
|
||||
except ValueError:
|
||||
raise exception.MalformedRequestBody(
|
||||
reason=_("malformed JSON in request body"))
|
||||
|
||||
|
||||
class XMLDeserializer(TextDeserializer):
|
||||
@ -86,8 +92,13 @@ class XMLDeserializer(TextDeserializer):
|
||||
|
||||
def default(self, datastring):
|
||||
plurals = set(self.metadata.get('plurals', {}))
|
||||
node = minidom.parseString(datastring).childNodes[0]
|
||||
return {node.nodeName: self._from_xml_node(node, plurals)}
|
||||
|
||||
try:
|
||||
node = minidom.parseString(datastring).childNodes[0]
|
||||
return {node.nodeName: self._from_xml_node(node, plurals)}
|
||||
except expat.ExpatError:
|
||||
raise exception.MalformedRequestBody(
|
||||
reason=_("malformed XML in request body"))
|
||||
|
||||
def _from_xml_node(self, node, listnames):
|
||||
"""Convert a minidom node to a simple Python type.
|
||||
@ -189,7 +200,7 @@ class DictSerializer(object):
|
||||
|
||||
def serialize(self, data, action='default'):
|
||||
"""Find local serialization method and encode response body."""
|
||||
action_method = getattr(self, action, self.default)
|
||||
action_method = getattr(self, str(action), self.default)
|
||||
return action_method(data)
|
||||
|
||||
def default(self, data):
|
||||
@ -296,7 +307,7 @@ class ResponseSerializer(object):
|
||||
}
|
||||
self.serializers.update(serializers or {})
|
||||
|
||||
def serialize(self, response_data, content_type):
|
||||
def serialize(self, response_data, content_type, action='default'):
|
||||
"""Serialize a dict into a string and wrap in a wsgi.Request object.
|
||||
|
||||
:param response_data: dict produced by the Controller
|
||||
@ -307,7 +318,7 @@ class ResponseSerializer(object):
|
||||
response.headers['Content-Type'] = content_type
|
||||
|
||||
serializer = self.get_serializer(content_type)
|
||||
response.body = serializer.serialize(response_data)
|
||||
response.body = serializer.serialize(response_data, action)
|
||||
|
||||
return response
|
||||
|
||||
@ -353,21 +364,25 @@ class Resource(wsgi.Application):
|
||||
request)
|
||||
except exception.InvalidContentType:
|
||||
return webob.exc.HTTPBadRequest(_("Unsupported Content-Type"))
|
||||
except exception.MalformedRequestBody:
|
||||
explanation = _("Malformed request body")
|
||||
return faults.Fault(webob.exc.HTTPBadRequest(
|
||||
explanation=explanation))
|
||||
|
||||
action_result = self.dispatch(request, action, action_args)
|
||||
|
||||
#TODO(bcwaldon): find a more elegant way to pass through non-dict types
|
||||
if type(action_result) is dict:
|
||||
response = self.serializer.serialize(action_result, accept)
|
||||
response = self.serializer.serialize(action_result, accept, action)
|
||||
else:
|
||||
response = action_result
|
||||
|
||||
try:
|
||||
msg_dict = dict(url=request.url, status=response.status_int)
|
||||
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
|
||||
except AttributeError:
|
||||
msg_dict = dict(url=request.url)
|
||||
msg = _("%(url)s returned a fault")
|
||||
except AttributeError, e:
|
||||
msg_dict = dict(url=request.url, e=e)
|
||||
msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
|
||||
|
||||
LOG.debug(msg)
|
||||
|
||||
|
@ -21,10 +21,15 @@ from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
from nova.compute import api as compute
|
||||
from nova.scheduler import api
|
||||
|
||||
from nova.api.openstack import create_instance_helper as helper
|
||||
from nova.api.openstack import common
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import wsgi
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
@ -59,6 +64,11 @@ def check_encryption_key(func):
|
||||
|
||||
|
||||
class Controller(object):
|
||||
"""Controller for Zone resources."""
|
||||
|
||||
def __init__(self):
|
||||
self.compute_api = compute.API()
|
||||
self.helper = helper.CreateInstanceHelper(self)
|
||||
|
||||
def index(self, req):
|
||||
"""Return all zones in brief"""
|
||||
@ -93,21 +103,39 @@ class Controller(object):
|
||||
return dict(zone=_scrub_zone(zone))
|
||||
|
||||
def delete(self, req, id):
|
||||
"""Delete a child zone entry."""
|
||||
zone_id = int(id)
|
||||
api.zone_delete(req.environ['nova.context'], zone_id)
|
||||
return {}
|
||||
|
||||
def create(self, req, body):
|
||||
"""Create a child zone entry."""
|
||||
context = req.environ['nova.context']
|
||||
zone = api.zone_create(context, body["zone"])
|
||||
return dict(zone=_scrub_zone(zone))
|
||||
|
||||
def update(self, req, id, body):
|
||||
"""Update a child zone entry."""
|
||||
context = req.environ['nova.context']
|
||||
zone_id = int(id)
|
||||
zone = api.zone_update(context, zone_id, body["zone"])
|
||||
return dict(zone=_scrub_zone(zone))
|
||||
|
||||
def boot(self, req, body):
|
||||
"""Creates a new server for a given user while being Zone aware.
|
||||
|
||||
Returns a reservation ID (a UUID).
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
extra_values, result = self.helper.create_instance(req, body,
|
||||
self.compute_api.create_all_at_once)
|
||||
except faults.Fault, f:
|
||||
return f
|
||||
|
||||
reservation_id = result
|
||||
return {'reservation_id': reservation_id}
|
||||
|
||||
@check_encryption_key
|
||||
def select(self, req, body):
|
||||
"""Returns a weighted list of costs to create instances
|
||||
@ -131,8 +159,37 @@ class Controller(object):
|
||||
blob=cipher_text))
|
||||
return cooked
|
||||
|
||||
def _image_ref_from_req_data(self, data):
|
||||
return data['server']['imageId']
|
||||
|
||||
def _flavor_id_from_req_data(self, data):
|
||||
return data['server']['flavorId']
|
||||
|
||||
def _get_server_admin_password(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return self.helper._get_server_admin_password_old_style(server)
|
||||
|
||||
|
||||
class ControllerV11(object):
|
||||
"""Controller for 1.1 Zone resources."""
|
||||
|
||||
def _get_server_admin_password(self, server):
|
||||
""" Determine the admin password for a server on creation """
|
||||
return self.helper._get_server_admin_password_new_style(server)
|
||||
|
||||
def _image_ref_from_req_data(self, data):
|
||||
return data['server']['imageRef']
|
||||
|
||||
def _flavor_id_from_req_data(self, data):
|
||||
return data['server']['flavorRef']
|
||||
|
||||
|
||||
def create_resource(version):
|
||||
controller = {
|
||||
'1.0': Controller,
|
||||
'1.1': ControllerV11,
|
||||
}[version]()
|
||||
|
||||
def create_resource():
|
||||
metadata = {
|
||||
"attributes": {
|
||||
"zone": ["id", "api_url", "name", "capabilities"],
|
||||
@ -144,4 +201,9 @@ def create_resource():
|
||||
metadata=metadata),
|
||||
}
|
||||
|
||||
return wsgi.Resource(Controller(), serializers=serializers)
|
||||
deserializers = {
|
||||
'application/xml': helper.ServerXMLDeserializer(),
|
||||
}
|
||||
|
||||
return wsgi.Resource(controller, serializers=serializers,
|
||||
deserializers=deserializers)
|
||||
|
@ -134,7 +134,8 @@ class API(base.Base):
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
availability_zone=None, user_data=None, metadata={},
|
||||
injected_files=None, admin_password=None, zone_blob=None):
|
||||
injected_files=None, admin_password=None, zone_blob=None,
|
||||
reservation_id=None):
|
||||
"""Verify all the input parameters regardless of the provisioning
|
||||
strategy being performed."""
|
||||
|
||||
@ -164,6 +165,9 @@ class API(base.Base):
|
||||
os_type = None
|
||||
if 'properties' in image and 'os_type' in image['properties']:
|
||||
os_type = image['properties']['os_type']
|
||||
vm_mode = None
|
||||
if 'properties' in image and 'vm_mode' in image['properties']:
|
||||
vm_mode = image['properties']['vm_mode']
|
||||
|
||||
if kernel_id is None:
|
||||
kernel_id = image['properties'].get('kernel_id', None)
|
||||
@ -200,8 +204,11 @@ class API(base.Base):
|
||||
key_pair = db.key_pair_get(context, context.user_id, key_name)
|
||||
key_data = key_pair['public_key']
|
||||
|
||||
if reservation_id is None:
|
||||
reservation_id = utils.generate_uid('r')
|
||||
|
||||
base_options = {
|
||||
'reservation_id': utils.generate_uid('r'),
|
||||
'reservation_id': reservation_id,
|
||||
'image_ref': image_href,
|
||||
'kernel_id': kernel_id or '',
|
||||
'ramdisk_id': ramdisk_id or '',
|
||||
@ -222,7 +229,8 @@ class API(base.Base):
|
||||
'locked': False,
|
||||
'metadata': metadata,
|
||||
'availability_zone': availability_zone,
|
||||
'os_type': os_type}
|
||||
'os_type': os_type,
|
||||
'vm_mode': vm_mode}
|
||||
|
||||
return (num_instances, base_options, security_groups)
|
||||
|
||||
@ -281,7 +289,7 @@ class API(base.Base):
|
||||
'instance_type': instance_type,
|
||||
'filter': filter_class,
|
||||
'blob': zone_blob,
|
||||
'num_instances': num_instances
|
||||
'num_instances': num_instances,
|
||||
}
|
||||
|
||||
rpc.cast(context,
|
||||
@ -300,7 +308,8 @@ class API(base.Base):
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
availability_zone=None, user_data=None, metadata={},
|
||||
injected_files=None, admin_password=None, zone_blob=None):
|
||||
injected_files=None, admin_password=None, zone_blob=None,
|
||||
reservation_id=None):
|
||||
"""Provision the instances by passing the whole request to
|
||||
the Scheduler for execution. Returns a Reservation ID
|
||||
related to the creation of all of these instances."""
|
||||
@ -312,7 +321,8 @@ class API(base.Base):
|
||||
display_name, display_description,
|
||||
key_name, key_data, security_group,
|
||||
availability_zone, user_data, metadata,
|
||||
injected_files, admin_password, zone_blob)
|
||||
injected_files, admin_password, zone_blob,
|
||||
reservation_id)
|
||||
|
||||
self._ask_scheduler_to_create_instance(context, base_options,
|
||||
instance_type, zone_blob,
|
||||
@ -328,7 +338,8 @@ class API(base.Base):
|
||||
display_name='', display_description='',
|
||||
key_name=None, key_data=None, security_group='default',
|
||||
availability_zone=None, user_data=None, metadata={},
|
||||
injected_files=None, admin_password=None, zone_blob=None):
|
||||
injected_files=None, admin_password=None, zone_blob=None,
|
||||
reservation_id=None):
|
||||
"""
|
||||
Provision the instances by sending off a series of single
|
||||
instance requests to the Schedulers. This is fine for trival
|
||||
@ -346,7 +357,8 @@ class API(base.Base):
|
||||
display_name, display_description,
|
||||
key_name, key_data, security_group,
|
||||
availability_zone, user_data, metadata,
|
||||
injected_files, admin_password, zone_blob)
|
||||
injected_files, admin_password, zone_blob,
|
||||
reservation_id)
|
||||
|
||||
instances = []
|
||||
LOG.debug(_("Going to run %s instances..."), num_instances)
|
||||
@ -510,6 +522,24 @@ class API(base.Base):
|
||||
"""
|
||||
return self.get(context, instance_id)
|
||||
|
||||
def get_all_across_zones(self, context, reservation_id):
|
||||
"""Get all instances with this reservation_id, across
|
||||
all available Zones (if any).
|
||||
"""
|
||||
instances = self.db.instance_get_all_by_reservation(
|
||||
context, reservation_id)
|
||||
|
||||
children = scheduler_api.call_zone_method(context, "list",
|
||||
novaclient_collection_name="servers",
|
||||
reservation_id=reservation_id)
|
||||
|
||||
for zone, servers in children:
|
||||
for server in servers:
|
||||
# Results are ready to send to user. No need to scrub.
|
||||
server._info['_is_precooked'] = True
|
||||
instances.append(server._info)
|
||||
return instances
|
||||
|
||||
def get_all(self, context, project_id=None, reservation_id=None,
|
||||
fixed_ip=None):
|
||||
"""Get all instances filtered by one of the given parameters.
|
||||
@ -518,8 +548,7 @@ class API(base.Base):
|
||||
all instances in the system.
|
||||
"""
|
||||
if reservation_id is not None:
|
||||
return self.db.instance_get_all_by_reservation(
|
||||
context, reservation_id)
|
||||
return self.get_all_across_zones(context, reservation_id)
|
||||
|
||||
if fixed_ip is not None:
|
||||
return self.db.fixed_ip_get_instance(context, fixed_ip)
|
||||
|
@ -176,7 +176,8 @@ def revoke_certs_by_project(project_id):
|
||||
def revoke_certs_by_user_and_project(user_id, project_id):
|
||||
"""Revoke certs for user in project."""
|
||||
admin = context.get_admin_context()
|
||||
for cert in db.certificate_get_all_by_user(admin, user_id, project_id):
|
||||
for cert in db.certificate_get_all_by_user_and_project(admin,
|
||||
user_id, project_id):
|
||||
revoke_cert(cert['project_id'], cert['file_name'])
|
||||
|
||||
|
||||
|
@ -907,6 +907,7 @@ def instance_get_all_by_host(context, host):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('metadata')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(host=host).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
@ -922,6 +923,7 @@ def instance_get_all_by_project(context, project_id):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('metadata')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
@ -937,6 +939,7 @@ def instance_get_all_by_reservation(context, reservation_id):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('metadata')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(reservation_id=reservation_id).\
|
||||
filter_by(deleted=can_read_deleted(context)).\
|
||||
@ -946,6 +949,7 @@ def instance_get_all_by_reservation(context, reservation_id):
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('metadata')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(project_id=context.project_id).\
|
||||
filter_by(reservation_id=reservation_id).\
|
||||
@ -959,6 +963,8 @@ def instance_get_project_vpn(context, project_id):
|
||||
return session.query(models.Instance).\
|
||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||
options(joinedload('security_groups')).\
|
||||
options(joinedload_all('fixed_ip.network')).\
|
||||
options(joinedload('metadata')).\
|
||||
options(joinedload('instance_type')).\
|
||||
filter_by(project_id=project_id).\
|
||||
filter_by(image_ref=str(FLAGS.vpn_image_id)).\
|
||||
|
@ -0,0 +1,45 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import Column, Integer, MetaData, String, Table
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
instances_vm_mode = Column('vm_mode',
|
||||
String(length=255, convert_unicode=False,
|
||||
assert_unicode=None, unicode_error=None,
|
||||
_warn_on_bytestring=False),
|
||||
nullable=True)
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
# Upgrade operations go here. Don't create your own engine;
|
||||
# bind migrate_engine to your metadata
|
||||
meta.bind = migrate_engine
|
||||
|
||||
instances = Table('instances', meta, autoload=True,
|
||||
autoload_with=migrate_engine)
|
||||
|
||||
instances.create_column(instances_vm_mode)
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
|
||||
instances = Table('instances', meta, autoload=True,
|
||||
autoload_with=migrate_engine)
|
||||
|
||||
instances.drop_column('vm_mode')
|
@ -232,6 +232,7 @@ class Instance(BASE, NovaBase):
|
||||
locked = Column(Boolean)
|
||||
|
||||
os_type = Column(String(255))
|
||||
vm_mode = Column(String(255))
|
||||
|
||||
# TODO(vish): see Ewan's email about state improvements, probably
|
||||
# should be in a driver base class or some such
|
||||
|
@ -585,3 +585,7 @@ class InstanceExists(Duplicate):
|
||||
|
||||
class MigrationError(NovaException):
|
||||
message = _("Migration error") + ": %(reason)s"
|
||||
|
||||
|
||||
class MalformedRequestBody(NovaException):
|
||||
message = _("Malformed message body: %(reason)s")
|
||||
|
@ -272,7 +272,7 @@ DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
|
||||
DEFINE_list('glance_api_servers',
|
||||
['127.0.0.1:9292'],
|
||||
['%s:9292' % _get_my_ip()],
|
||||
'list of glance api servers available to nova (host:port)')
|
||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
|
||||
|
@ -106,12 +106,14 @@ def _wrap_method(function, self):
|
||||
def _process(func, zone):
|
||||
"""Worker stub for green thread pool. Give the worker
|
||||
an authenticated nova client and zone info."""
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, None,
|
||||
zone.api_url)
|
||||
nova.authenticate()
|
||||
return func(nova, zone)
|
||||
|
||||
|
||||
def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
|
||||
def call_zone_method(context, method_name, errors_to_ignore=None,
|
||||
novaclient_collection_name='zones', *args, **kwargs):
|
||||
"""Returns a list of (zone, call_result) objects."""
|
||||
if not isinstance(errors_to_ignore, (list, tuple)):
|
||||
# This will also handle the default None
|
||||
@ -121,7 +123,7 @@ def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
|
||||
results = []
|
||||
for zone in db.zone_get_all(context):
|
||||
try:
|
||||
nova = novaclient.OpenStack(zone.username, zone.password,
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, None,
|
||||
zone.api_url)
|
||||
nova.authenticate()
|
||||
except novaclient.exceptions.BadRequest, e:
|
||||
@ -131,18 +133,16 @@ def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
|
||||
#TODO (dabo) - add logic for failure counts per zone,
|
||||
# with escalation after a given number of failures.
|
||||
continue
|
||||
zone_method = getattr(nova.zones, method)
|
||||
novaclient_collection = getattr(nova, novaclient_collection_name)
|
||||
collection_method = getattr(novaclient_collection, method_name)
|
||||
|
||||
def _error_trap(*args, **kwargs):
|
||||
try:
|
||||
return zone_method(*args, **kwargs)
|
||||
return collection_method(*args, **kwargs)
|
||||
except Exception as e:
|
||||
if type(e) in errors_to_ignore:
|
||||
return None
|
||||
# TODO (dabo) - want to be able to re-raise here.
|
||||
# Returning a string now; raising was causing issues.
|
||||
# raise e
|
||||
return "ERROR", "%s" % e
|
||||
raise
|
||||
|
||||
res = pool.spawn(_error_trap, *args, **kwargs)
|
||||
results.append((zone, res))
|
||||
|
@ -89,8 +89,8 @@ class SchedulerManager(manager.Manager):
|
||||
host = getattr(self.driver, driver_method)(elevated, *args,
|
||||
**kwargs)
|
||||
except AttributeError, e:
|
||||
LOG.exception(_("Driver Method %(driver_method)s missing: %(e)s")
|
||||
% locals())
|
||||
LOG.warning(_("Driver Method %(driver_method)s missing: %(e)s."
|
||||
"Reverting to schedule()") % locals())
|
||||
host = self.driver.schedule(elevated, topic, *args, **kwargs)
|
||||
|
||||
if not host:
|
||||
|
@ -88,9 +88,10 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
instance_properties = request_spec['instance_properties']
|
||||
|
||||
name = instance_properties['display_name']
|
||||
image_id = instance_properties['image_id']
|
||||
image_ref = instance_properties['image_ref']
|
||||
meta = instance_properties['metadata']
|
||||
flavor_id = instance_type['flavorid']
|
||||
reservation_id = instance_properties['reservation_id']
|
||||
|
||||
files = kwargs['injected_files']
|
||||
ipgroup = None # Not supported in OS API ... yet
|
||||
@ -99,18 +100,20 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
child_blob = zone_info['child_blob']
|
||||
zone = db.zone_get(context, child_zone)
|
||||
url = zone.api_url
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s")
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
||||
". ReservationID=%(reservation_id)s")
|
||||
% locals())
|
||||
nova = None
|
||||
try:
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, url)
|
||||
nova = novaclient.OpenStack(zone.username, zone.password, None,
|
||||
url)
|
||||
nova.authenticate()
|
||||
except novaclient.exceptions.BadRequest, e:
|
||||
raise exception.NotAuthorized(_("Bad credentials attempting "
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
|
||||
nova.servers.create(name, image_id, flavor_id, ipgroup, meta, files,
|
||||
child_blob)
|
||||
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
|
||||
child_blob, reservation_id=reservation_id)
|
||||
|
||||
def _provision_resource_from_blob(self, context, item, instance_id,
|
||||
request_spec, kwargs):
|
||||
@ -182,7 +185,11 @@ class ZoneAwareScheduler(driver.Scheduler):
|
||||
if not build_plan:
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
|
||||
for item in build_plan:
|
||||
for num in xrange(request_spec['num_instances']):
|
||||
if not build_plan:
|
||||
break
|
||||
|
||||
item = build_plan.pop(0)
|
||||
self._provision_resource(context, item, instance_id, request_spec,
|
||||
kwargs)
|
||||
|
||||
|
@ -89,7 +89,8 @@ class ZoneState(object):
|
||||
|
||||
def _call_novaclient(zone):
|
||||
"""Call novaclient. Broken out for testing purposes."""
|
||||
client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
|
||||
client = novaclient.OpenStack(zone.username, zone.password, None,
|
||||
zone.api_url)
|
||||
return client.zones.info()._info
|
||||
|
||||
|
||||
|
@ -15,6 +15,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
import webob.exc
|
||||
import webob.dec
|
||||
|
||||
@ -23,6 +25,7 @@ from webob import Request
|
||||
from nova import test
|
||||
from nova.api import openstack
|
||||
from nova.api.openstack import faults
|
||||
from nova.tests.api.openstack import fakes
|
||||
|
||||
|
||||
class APITest(test.TestCase):
|
||||
@ -31,6 +34,24 @@ class APITest(test.TestCase):
|
||||
# simpler version of the app than fakes.wsgi_app
|
||||
return openstack.FaultWrapper(inner_app)
|
||||
|
||||
def test_malformed_json(self):
|
||||
req = webob.Request.blank('/')
|
||||
req.method = 'POST'
|
||||
req.body = '{'
|
||||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_malformed_xml(self):
|
||||
req = webob.Request.blank('/')
|
||||
req.method = 'POST'
|
||||
req.body = '<hi im not xml>'
|
||||
req.headers["content-type"] = "application/xml"
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_exceptions_are_converted_to_faults(self):
|
||||
|
||||
@webob.dec.wsgify
|
||||
|
@ -89,6 +89,7 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
self.assertEqual('value1', res_dict['metadata']['key1'])
|
||||
|
||||
def test_index_no_data(self):
|
||||
@ -99,6 +100,7 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
self.assertEqual(0, len(res_dict['metadata']))
|
||||
|
||||
def test_show(self):
|
||||
@ -109,6 +111,7 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
self.assertEqual('value5', res_dict['key5'])
|
||||
|
||||
def test_show_meta_not_found(self):
|
||||
@ -140,8 +143,19 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
self.assertEqual('value1', res_dict['metadata']['key1'])
|
||||
|
||||
def test_create_empty_body(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
|
||||
return_create_instance_metadata)
|
||||
req = webob.Request.blank('/v1.1/servers/1/meta')
|
||||
req.environ['api.version'] = '1.1'
|
||||
req.method = 'POST'
|
||||
req.headers["content-type"] = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(400, res.status_int)
|
||||
|
||||
def test_update_item(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
|
||||
return_create_instance_metadata)
|
||||
@ -152,9 +166,20 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
req.headers["content-type"] = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual('value1', res_dict['key1'])
|
||||
|
||||
def test_update_item_empty_body(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
|
||||
return_create_instance_metadata)
|
||||
req = webob.Request.blank('/v1.1/servers/1/meta/key1')
|
||||
req.environ['api.version'] = '1.1'
|
||||
req.method = 'PUT'
|
||||
req.headers["content-type"] = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(400, res.status_int)
|
||||
|
||||
def test_update_item_too_many_keys(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
|
||||
return_create_instance_metadata)
|
||||
|
@ -31,10 +31,12 @@ from nova import test
|
||||
from nova import utils
|
||||
import nova.api.openstack
|
||||
from nova.api.openstack import servers
|
||||
from nova.api.openstack import create_instance_helper
|
||||
import nova.compute.api
|
||||
from nova.compute import instance_types
|
||||
from nova.compute import power_state
|
||||
import nova.db.api
|
||||
import nova.scheduler.api
|
||||
from nova.db.sqlalchemy.models import Instance
|
||||
from nova.db.sqlalchemy.models import InstanceMetadata
|
||||
import nova.image.fake
|
||||
@ -68,6 +70,34 @@ def return_servers(context, user_id=1):
|
||||
return [stub_instance(i, user_id) for i in xrange(5)]
|
||||
|
||||
|
||||
def return_servers_by_reservation(context, reservation_id=""):
|
||||
return [stub_instance(i, reservation_id) for i in xrange(5)]
|
||||
|
||||
|
||||
def return_servers_by_reservation_empty(context, reservation_id=""):
|
||||
return []
|
||||
|
||||
|
||||
def return_servers_from_child_zones_empty(*args, **kwargs):
|
||||
return []
|
||||
|
||||
|
||||
def return_servers_from_child_zones(*args, **kwargs):
|
||||
class Server(object):
|
||||
pass
|
||||
|
||||
zones = []
|
||||
for zone in xrange(3):
|
||||
servers = []
|
||||
for server_id in xrange(5):
|
||||
server = Server()
|
||||
server._info = stub_instance(server_id, reservation_id="child")
|
||||
servers.append(server)
|
||||
|
||||
zones.append(("Zone%d" % zone, servers))
|
||||
return zones
|
||||
|
||||
|
||||
def return_security_group(context, instance_id, security_group_id):
|
||||
pass
|
||||
|
||||
@ -81,7 +111,7 @@ def instance_address(context, instance_id):
|
||||
|
||||
|
||||
def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
|
||||
host=None, power_state=0):
|
||||
host=None, power_state=0, reservation_id=""):
|
||||
metadata = []
|
||||
metadata.append(InstanceMetadata(key='seq', value=id))
|
||||
|
||||
@ -93,6 +123,11 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
|
||||
if host is not None:
|
||||
host = str(host)
|
||||
|
||||
# ReservationID isn't sent back, hack it in there.
|
||||
server_name = "server%s" % id
|
||||
if reservation_id != "":
|
||||
server_name = "reservation_%s" % (reservation_id, )
|
||||
|
||||
instance = {
|
||||
"id": id,
|
||||
"admin_pass": "",
|
||||
@ -113,13 +148,13 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
|
||||
"host": host,
|
||||
"instance_type": dict(inst_type),
|
||||
"user_data": "",
|
||||
"reservation_id": "",
|
||||
"reservation_id": reservation_id,
|
||||
"mac_address": "",
|
||||
"scheduled_at": utils.utcnow(),
|
||||
"launched_at": utils.utcnow(),
|
||||
"terminated_at": utils.utcnow(),
|
||||
"availability_zone": "",
|
||||
"display_name": "server%s" % id,
|
||||
"display_name": server_name,
|
||||
"display_description": "",
|
||||
"locked": False,
|
||||
"metadata": metadata}
|
||||
@ -364,6 +399,57 @@ class ServersTest(test.TestCase):
|
||||
self.assertEqual(s.get('imageId', None), None)
|
||||
i += 1
|
||||
|
||||
def test_get_server_list_with_reservation_id(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_get_all_by_reservation',
|
||||
return_servers_by_reservation)
|
||||
self.stubs.Set(nova.scheduler.api, 'call_zone_method',
|
||||
return_servers_from_child_zones)
|
||||
req = webob.Request.blank('/v1.0/servers?reservation_id=foo')
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
|
||||
i = 0
|
||||
for s in res_dict['servers']:
|
||||
if '_is_precooked' in s:
|
||||
self.assertEqual(s.get('reservation_id'), 'child')
|
||||
else:
|
||||
self.assertEqual(s.get('name'), 'server%d' % i)
|
||||
i += 1
|
||||
|
||||
def test_get_server_list_with_reservation_id_empty(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_get_all_by_reservation',
|
||||
return_servers_by_reservation_empty)
|
||||
self.stubs.Set(nova.scheduler.api, 'call_zone_method',
|
||||
return_servers_from_child_zones_empty)
|
||||
req = webob.Request.blank('/v1.0/servers/detail?reservation_id=foo')
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
|
||||
i = 0
|
||||
for s in res_dict['servers']:
|
||||
if '_is_precooked' in s:
|
||||
self.assertEqual(s.get('reservation_id'), 'child')
|
||||
else:
|
||||
self.assertEqual(s.get('name'), 'server%d' % i)
|
||||
i += 1
|
||||
|
||||
def test_get_server_list_with_reservation_id_details(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_get_all_by_reservation',
|
||||
return_servers_by_reservation)
|
||||
self.stubs.Set(nova.scheduler.api, 'call_zone_method',
|
||||
return_servers_from_child_zones)
|
||||
req = webob.Request.blank('/v1.0/servers/detail?reservation_id=foo')
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
|
||||
i = 0
|
||||
for s in res_dict['servers']:
|
||||
if '_is_precooked' in s:
|
||||
self.assertEqual(s.get('reservation_id'), 'child')
|
||||
else:
|
||||
self.assertEqual(s.get('name'), 'server%d' % i)
|
||||
i += 1
|
||||
|
||||
def test_get_server_list_v1_1(self):
|
||||
req = webob.Request.blank('/v1.1/servers')
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
@ -483,7 +569,8 @@ class ServersTest(test.TestCase):
|
||||
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
|
||||
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
|
||||
fake_method)
|
||||
self.stubs.Set(nova.api.openstack.servers.Controller,
|
||||
self.stubs.Set(
|
||||
nova.api.openstack.create_instance_helper.CreateInstanceHelper,
|
||||
"_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
|
||||
self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
|
||||
|
||||
@ -512,6 +599,48 @@ class ServersTest(test.TestCase):
|
||||
def test_create_instance(self):
|
||||
self._test_create_instance_helper()
|
||||
|
||||
def test_create_instance_via_zones(self):
|
||||
"""Server generated ReservationID"""
|
||||
self._setup_for_create_instance()
|
||||
FLAGS.allow_admin_api = True
|
||||
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=3, flavorId=2,
|
||||
metadata={'hello': 'world', 'open': 'stack'},
|
||||
personality={}))
|
||||
req = webob.Request.blank('/v1.0/zones/boot')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
reservation_id = json.loads(res.body)['reservation_id']
|
||||
self.assertEqual(res.status_int, 200)
|
||||
self.assertNotEqual(reservation_id, "")
|
||||
self.assertNotEqual(reservation_id, None)
|
||||
self.assertTrue(len(reservation_id) > 1)
|
||||
|
||||
def test_create_instance_via_zones_with_resid(self):
|
||||
"""User supplied ReservationID"""
|
||||
self._setup_for_create_instance()
|
||||
FLAGS.allow_admin_api = True
|
||||
|
||||
body = dict(server=dict(
|
||||
name='server_test', imageId=3, flavorId=2,
|
||||
metadata={'hello': 'world', 'open': 'stack'},
|
||||
personality={}, reservation_id='myresid'))
|
||||
req = webob.Request.blank('/v1.0/zones/boot')
|
||||
req.method = 'POST'
|
||||
req.body = json.dumps(body)
|
||||
req.headers["content-type"] = "application/json"
|
||||
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
|
||||
reservation_id = json.loads(res.body)['reservation_id']
|
||||
self.assertEqual(res.status_int, 200)
|
||||
self.assertEqual(reservation_id, "myresid")
|
||||
|
||||
def test_create_instance_no_key_pair(self):
|
||||
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
|
||||
self._test_create_instance_helper()
|
||||
@ -1401,7 +1530,7 @@ class ServersTest(test.TestCase):
|
||||
class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.deserializer = servers.ServerXMLDeserializer()
|
||||
self.deserializer = create_instance_helper.ServerXMLDeserializer()
|
||||
|
||||
def test_minimal_request(self):
|
||||
serial_request = """
|
||||
@ -1733,7 +1862,8 @@ class TestServerInstanceCreation(test.TestCase):
|
||||
|
||||
compute_api = MockComputeAPI()
|
||||
self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api))
|
||||
self.stubs.Set(nova.api.openstack.servers.Controller,
|
||||
self.stubs.Set(
|
||||
nova.api.openstack.create_instance_helper.CreateInstanceHelper,
|
||||
'_get_kernel_ramdisk_from_image', make_stub_method((1, 1)))
|
||||
return compute_api
|
||||
|
||||
@ -1989,6 +2119,6 @@ class TestGetKernelRamdiskFromImage(test.TestCase):
|
||||
@staticmethod
|
||||
def _get_k_r(image_meta):
|
||||
"""Rebinding function to a shorter name for convenience"""
|
||||
kernel_id, ramdisk_id = \
|
||||
servers.Controller._do_get_kernel_ramdisk_from_image(image_meta)
|
||||
kernel_id, ramdisk_id = create_instance_helper.CreateInstanceHelper. \
|
||||
_do_get_kernel_ramdisk_from_image(image_meta)
|
||||
return kernel_id, ramdisk_id
|
||||
|
@ -89,6 +89,12 @@ class DictSerializerTest(test.TestCase):
|
||||
serializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(serializer.serialize({}, 'update'), 'trousers')
|
||||
|
||||
def test_dispatch_action_None(self):
|
||||
serializer = wsgi.DictSerializer()
|
||||
serializer.create = lambda x: 'pants'
|
||||
serializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(serializer.serialize({}, None), 'trousers')
|
||||
|
||||
|
||||
class XMLDictSerializerTest(test.TestCase):
|
||||
def test_xml(self):
|
||||
@ -123,6 +129,12 @@ class TextDeserializerTest(test.TestCase):
|
||||
deserializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers')
|
||||
|
||||
def test_dispatch_action_None(self):
|
||||
deserializer = wsgi.TextDeserializer()
|
||||
deserializer.create = lambda x: 'pants'
|
||||
deserializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(deserializer.deserialize({}, None), 'trousers')
|
||||
|
||||
|
||||
class JSONDeserializerTest(test.TestCase):
|
||||
def test_json(self):
|
||||
@ -171,11 +183,11 @@ class XMLDeserializerTest(test.TestCase):
|
||||
class ResponseSerializerTest(test.TestCase):
|
||||
def setUp(self):
|
||||
class JSONSerializer(object):
|
||||
def serialize(self, data):
|
||||
def serialize(self, data, action='default'):
|
||||
return 'pew_json'
|
||||
|
||||
class XMLSerializer(object):
|
||||
def serialize(self, data):
|
||||
def serialize(self, data, action='default'):
|
||||
return 'pew_xml'
|
||||
|
||||
self.serializers = {
|
||||
@ -211,11 +223,11 @@ class ResponseSerializerTest(test.TestCase):
|
||||
class RequestDeserializerTest(test.TestCase):
|
||||
def setUp(self):
|
||||
class JSONDeserializer(object):
|
||||
def deserialize(self, data):
|
||||
def deserialize(self, data, action='default'):
|
||||
return 'pew_json'
|
||||
|
||||
class XMLDeserializer(object):
|
||||
def deserialize(self, data):
|
||||
def deserialize(self, data, action='default'):
|
||||
return 'pew_xml'
|
||||
|
||||
self.deserializers = {
|
||||
|
@ -133,11 +133,11 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300]
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700]
|
||||
['>', '$compute.disk_available', 700],
|
||||
]
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
@ -183,12 +183,12 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False]
|
||||
['not', True, False, True, False],
|
||||
)))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False
|
||||
'not', True, False, True, False,
|
||||
))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
|
@ -44,7 +44,7 @@ class WeightedSumTestCase(test.TestCase):
|
||||
hosts = [
|
||||
FakeHost(1, 512 * MB, 100),
|
||||
FakeHost(2, 256 * MB, 400),
|
||||
FakeHost(3, 512 * MB, 100)
|
||||
FakeHost(3, 512 * MB, 100),
|
||||
]
|
||||
|
||||
weighted_fns = [
|
||||
@ -96,7 +96,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_noop_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
'nova.scheduler.least_cost.noop_cost_fn',
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 1
|
||||
|
||||
@ -110,7 +110,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_cost_fn_weights(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
'nova.scheduler.least_cost.noop_cost_fn',
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 2
|
||||
|
||||
@ -124,7 +124,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_fill_first_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.fill_first_cost_fn'
|
||||
'nova.scheduler.least_cost.fill_first_cost_fn',
|
||||
]
|
||||
FLAGS.fill_first_cost_fn_weight = 1
|
||||
|
||||
|
@ -1110,10 +1110,4 @@ class CallZoneMethodTest(test.TestCase):
|
||||
def test_call_zone_method_generates_exception(self):
|
||||
context = {}
|
||||
method = 'raises_exception'
|
||||
results = api.call_zone_method(context, method)
|
||||
|
||||
# FIXME(sirp): for now the _error_trap code is catching errors and
|
||||
# converting them to a ("ERROR", "string") tuples. The code (and this
|
||||
# test) should eventually handle real exceptions.
|
||||
expected = [(1, ('ERROR', 'testing'))]
|
||||
self.assertEqual(expected, results)
|
||||
self.assertRaises(Exception, api.call_zone_method, context, method)
|
||||
|
@ -201,7 +201,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
||||
'instance_properties': {},
|
||||
'instance_type': {},
|
||||
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'blob': "Non-None blob data"
|
||||
'blob': "Non-None blob data",
|
||||
}
|
||||
|
||||
result = sched.schedule_run_instance(None, 1, request_spec)
|
||||
|
@ -457,6 +457,12 @@ class CloudTestCase(test.TestCase):
|
||||
self.cloud.delete_key_pair(self.context, 'test')
|
||||
|
||||
def test_run_instances(self):
|
||||
# stub out the rpc call
|
||||
def stub_cast(*args, **kwargs):
|
||||
pass
|
||||
|
||||
self.stubs.Set(rpc, 'cast', stub_cast)
|
||||
|
||||
kwargs = {'image_id': FLAGS.default_image,
|
||||
'instance_type': FLAGS.default_instance_type,
|
||||
'max_count': 1}
|
||||
@ -466,7 +472,7 @@ class CloudTestCase(test.TestCase):
|
||||
self.assertEqual(instance['imageId'], 'ami-00000001')
|
||||
self.assertEqual(instance['displayName'], 'Server 1')
|
||||
self.assertEqual(instance['instanceId'], 'i-00000001')
|
||||
self.assertEqual(instance['instanceState']['name'], 'networking')
|
||||
self.assertEqual(instance['instanceState']['name'], 'scheduling')
|
||||
self.assertEqual(instance['instanceType'], 'm1.small')
|
||||
|
||||
def test_run_instances_image_state_none(self):
|
||||
|
@ -16,7 +16,11 @@
|
||||
Tests for Crypto module.
|
||||
"""
|
||||
|
||||
import mox
|
||||
import stubout
|
||||
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import test
|
||||
|
||||
|
||||
@ -46,3 +50,82 @@ class SymmetricKeyTestCase(test.TestCase):
|
||||
plain = decrypt(cipher_text)
|
||||
|
||||
self.assertEquals(plain_text, plain)
|
||||
|
||||
|
||||
class RevokeCertsTest(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(RevokeCertsTest, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(RevokeCertsTest, self).tearDown()
|
||||
|
||||
def test_revoke_certs_by_user_and_project(self):
|
||||
user_id = 'test_user'
|
||||
project_id = 2
|
||||
file_name = 'test_file'
|
||||
|
||||
def mock_certificate_get_all_by_user_and_project(context,
|
||||
user_id,
|
||||
project_id):
|
||||
|
||||
return [{"user_id": user_id, "project_id": project_id,
|
||||
"file_name": file_name}]
|
||||
|
||||
self.stubs.Set(db, 'certificate_get_all_by_user_and_project',
|
||||
mock_certificate_get_all_by_user_and_project)
|
||||
|
||||
self.mox.StubOutWithMock(crypto, 'revoke_cert')
|
||||
crypto.revoke_cert(project_id, file_name)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
crypto.revoke_certs_by_user_and_project(user_id, project_id)
|
||||
|
||||
self.mox.VerifyAll()
|
||||
|
||||
def test_revoke_certs_by_user(self):
|
||||
user_id = 'test_user'
|
||||
project_id = 2
|
||||
file_name = 'test_file'
|
||||
|
||||
def mock_certificate_get_all_by_user(context, user_id):
|
||||
|
||||
return [{"user_id": user_id, "project_id": project_id,
|
||||
"file_name": file_name}]
|
||||
|
||||
self.stubs.Set(db, 'certificate_get_all_by_user',
|
||||
mock_certificate_get_all_by_user)
|
||||
|
||||
self.mox.StubOutWithMock(crypto, 'revoke_cert')
|
||||
crypto.revoke_cert(project_id, mox.IgnoreArg())
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
crypto.revoke_certs_by_user(user_id)
|
||||
|
||||
self.mox.VerifyAll()
|
||||
|
||||
def test_revoke_certs_by_project(self):
|
||||
user_id = 'test_user'
|
||||
project_id = 2
|
||||
file_name = 'test_file'
|
||||
|
||||
def mock_certificate_get_all_by_project(context, project_id):
|
||||
|
||||
return [{"user_id": user_id, "project_id": project_id,
|
||||
"file_name": file_name}]
|
||||
|
||||
self.stubs.Set(db, 'certificate_get_all_by_project',
|
||||
mock_certificate_get_all_by_project)
|
||||
|
||||
self.mox.StubOutWithMock(crypto, 'revoke_cert')
|
||||
crypto.revoke_cert(project_id, mox.IgnoreArg())
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
crypto.revoke_certs_by_project(project_id)
|
||||
|
||||
self.mox.VerifyAll()
|
||||
|
@ -331,7 +331,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
|
||||
def check_vm_params_for_linux(self):
|
||||
self.assertEquals(self.vm['platform']['nx'], 'false')
|
||||
self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
|
||||
self.assertEquals(self.vm['PV_args'], '')
|
||||
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
|
||||
|
||||
# check that these are not set
|
||||
|
@ -157,7 +157,6 @@ class VMHelper(HelperBase):
|
||||
rec['PV_ramdisk'] = ramdisk
|
||||
else:
|
||||
# 2. Use kernel within the image
|
||||
rec['PV_args'] = 'clocksource=jiffies'
|
||||
rec['PV_bootloader'] = 'pygrub'
|
||||
else:
|
||||
# 3. Using hardware virtualization
|
||||
@ -329,12 +328,6 @@ class VMHelper(HelperBase):
|
||||
'snap': template_vdi_uuid}
|
||||
return template_vm_ref, template_vdi_uuids
|
||||
|
||||
@classmethod
|
||||
def get_sr(cls, session, sr_label='slices'):
|
||||
"""Finds the SR named by the given name label and returns
|
||||
the UUID"""
|
||||
return session.call_xenapi('SR.get_by_name_label', sr_label)[0]
|
||||
|
||||
@classmethod
|
||||
def get_sr_path(cls, session):
|
||||
"""Return the path to our storage repository
|
||||
@ -790,8 +783,7 @@ class VMHelper(HelperBase):
|
||||
@classmethod
|
||||
def scan_default_sr(cls, session):
|
||||
"""Looks for the system default SR and triggers a re-scan"""
|
||||
#FIXME(sirp/mdietz): refactor scan_default_sr in there
|
||||
sr_ref = cls.get_sr(session)
|
||||
sr_ref = find_sr(session)
|
||||
session.call_xenapi('SR.scan', sr_ref)
|
||||
|
||||
|
||||
|
@ -160,9 +160,24 @@ class VMOps(object):
|
||||
# Create the VM ref and attach the first disk
|
||||
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
|
||||
vdis[0]['vdi_uuid'])
|
||||
use_pv_kernel = VMHelper.determine_is_pv(self._session,
|
||||
instance.id, first_vdi_ref, disk_image_type,
|
||||
instance.os_type)
|
||||
|
||||
vm_mode = instance.vm_mode and instance.vm_mode.lower()
|
||||
if vm_mode == 'pv':
|
||||
use_pv_kernel = True
|
||||
elif vm_mode in ('hv', 'hvm'):
|
||||
use_pv_kernel = False
|
||||
vm_mode = 'hvm' # Normalize
|
||||
else:
|
||||
use_pv_kernel = VMHelper.determine_is_pv(self._session,
|
||||
instance.id, first_vdi_ref, disk_image_type,
|
||||
instance.os_type)
|
||||
vm_mode = use_pv_kernel and 'pv' or 'hvm'
|
||||
|
||||
if instance.vm_mode != vm_mode:
|
||||
# Update database with normalized (or determined) value
|
||||
db.instance_update(context.get_admin_context(),
|
||||
instance['id'], {'vm_mode': vm_mode})
|
||||
|
||||
vm_ref = VMHelper.create_vm(self._session, instance,
|
||||
kernel, ramdisk, use_pv_kernel)
|
||||
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
|
||||
|
@ -44,7 +44,7 @@ def move_vhds_into_sr(session, args):
|
||||
new_cow_uuid = params['new_cow_uuid']
|
||||
|
||||
sr_path = params['sr_path']
|
||||
sr_temp_path = "%s/images/" % sr_path
|
||||
sr_temp_path = "%s/tmp/" % sr_path
|
||||
|
||||
# Discover the copied VHDs locally, and then set up paths to copy
|
||||
# them to under the SR
|
||||
|
92
run_tests.py
@ -56,9 +56,11 @@ To run a single test module:
|
||||
"""
|
||||
|
||||
import gettext
|
||||
import heapq
|
||||
import os
|
||||
import unittest
|
||||
import sys
|
||||
import time
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
@ -183,9 +185,21 @@ class _NullColorizer(object):
|
||||
self.stream.write(text)
|
||||
|
||||
|
||||
def get_elapsed_time_color(elapsed_time):
|
||||
if elapsed_time > 1.0:
|
||||
return 'red'
|
||||
elif elapsed_time > 0.25:
|
||||
return 'yellow'
|
||||
else:
|
||||
return 'green'
|
||||
|
||||
|
||||
class NovaTestResult(result.TextTestResult):
|
||||
def __init__(self, *args, **kw):
|
||||
self.show_elapsed = kw.pop('show_elapsed')
|
||||
result.TextTestResult.__init__(self, *args, **kw)
|
||||
self.num_slow_tests = 5
|
||||
self.slow_tests = [] # this is a fixed-sized heap
|
||||
self._last_case = None
|
||||
self.colorizer = None
|
||||
# NOTE(vish): reset stdout for the terminal check
|
||||
@ -200,25 +214,40 @@ class NovaTestResult(result.TextTestResult):
|
||||
def getDescription(self, test):
|
||||
return str(test)
|
||||
|
||||
def _handleElapsedTime(self, test):
|
||||
self.elapsed_time = time.time() - self.start_time
|
||||
item = (self.elapsed_time, test)
|
||||
# Record only the n-slowest tests using heap
|
||||
if len(self.slow_tests) >= self.num_slow_tests:
|
||||
heapq.heappushpop(self.slow_tests, item)
|
||||
else:
|
||||
heapq.heappush(self.slow_tests, item)
|
||||
|
||||
def _writeElapsedTime(self, test):
|
||||
color = get_elapsed_time_color(self.elapsed_time)
|
||||
self.colorizer.write(" %.2f" % self.elapsed_time, color)
|
||||
|
||||
def _writeResult(self, test, long_result, color, short_result, success):
|
||||
if self.showAll:
|
||||
self.colorizer.write(long_result, color)
|
||||
if self.show_elapsed and success:
|
||||
self._writeElapsedTime(test)
|
||||
self.stream.writeln()
|
||||
elif self.dots:
|
||||
self.stream.write(short_result)
|
||||
self.stream.flush()
|
||||
|
||||
# NOTE(vish): copied from unittest with edit to add color
|
||||
def addSuccess(self, test):
|
||||
unittest.TestResult.addSuccess(self, test)
|
||||
if self.showAll:
|
||||
self.colorizer.write("OK", 'green')
|
||||
self.stream.writeln()
|
||||
elif self.dots:
|
||||
self.stream.write('.')
|
||||
self.stream.flush()
|
||||
self._handleElapsedTime(test)
|
||||
self._writeResult(test, 'OK', 'green', '.', True)
|
||||
|
||||
# NOTE(vish): copied from unittest with edit to add color
|
||||
def addFailure(self, test, err):
|
||||
unittest.TestResult.addFailure(self, test, err)
|
||||
if self.showAll:
|
||||
self.colorizer.write("FAIL", 'red')
|
||||
self.stream.writeln()
|
||||
elif self.dots:
|
||||
self.stream.write('F')
|
||||
self.stream.flush()
|
||||
self._handleElapsedTime(test)
|
||||
self._writeResult(test, 'FAIL', 'red', 'F', False)
|
||||
|
||||
# NOTE(vish): copied from nose with edit to add color
|
||||
def addError(self, test, err):
|
||||
@ -226,6 +255,7 @@ class NovaTestResult(result.TextTestResult):
|
||||
errorClasses. If the exception is a registered class, the
|
||||
error will be added to the list for that class, not errors.
|
||||
"""
|
||||
self._handleElapsedTime(test)
|
||||
stream = getattr(self, 'stream', None)
|
||||
ec, ev, tb = err
|
||||
try:
|
||||
@ -252,14 +282,11 @@ class NovaTestResult(result.TextTestResult):
|
||||
self.errors.append((test, exc_info))
|
||||
test.passed = False
|
||||
if stream is not None:
|
||||
if self.showAll:
|
||||
self.colorizer.write("ERROR", 'red')
|
||||
self.stream.writeln()
|
||||
elif self.dots:
|
||||
stream.write('E')
|
||||
self._writeResult(test, 'ERROR', 'red', 'E', False)
|
||||
|
||||
def startTest(self, test):
|
||||
unittest.TestResult.startTest(self, test)
|
||||
self.start_time = time.time()
|
||||
current_case = test.test.__class__.__name__
|
||||
|
||||
if self.showAll:
|
||||
@ -273,21 +300,47 @@ class NovaTestResult(result.TextTestResult):
|
||||
|
||||
|
||||
class NovaTestRunner(core.TextTestRunner):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.show_elapsed = kwargs.pop('show_elapsed')
|
||||
core.TextTestRunner.__init__(self, *args, **kwargs)
|
||||
|
||||
def _makeResult(self):
|
||||
return NovaTestResult(self.stream,
|
||||
self.descriptions,
|
||||
self.verbosity,
|
||||
self.config)
|
||||
self.config,
|
||||
show_elapsed=self.show_elapsed)
|
||||
|
||||
def _writeSlowTests(self, result_):
|
||||
# Pare out 'fast' tests
|
||||
slow_tests = [item for item in result_.slow_tests
|
||||
if get_elapsed_time_color(item[0]) != 'green']
|
||||
if slow_tests:
|
||||
slow_total_time = sum(item[0] for item in slow_tests)
|
||||
self.stream.writeln("Slowest %i tests took %.2f secs:"
|
||||
% (len(slow_tests), slow_total_time))
|
||||
for elapsed_time, test in sorted(slow_tests, reverse=True):
|
||||
time_str = "%.2f" % elapsed_time
|
||||
self.stream.writeln(" %s %s" % (time_str.ljust(10), test))
|
||||
|
||||
def run(self, test):
|
||||
result_ = core.TextTestRunner.run(self, test)
|
||||
if self.show_elapsed:
|
||||
self._writeSlowTests(result_)
|
||||
return result_
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.setup()
|
||||
# If any argument looks like a test name but doesn't have "nova.tests" in
|
||||
# front of it, automatically add that so we don't have to type as much
|
||||
show_elapsed = True
|
||||
argv = []
|
||||
for x in sys.argv:
|
||||
if x.startswith('test_'):
|
||||
argv.append('nova.tests.%s' % x)
|
||||
elif x.startswith('--hide-elapsed'):
|
||||
show_elapsed = False
|
||||
else:
|
||||
argv.append(x)
|
||||
|
||||
@ -300,5 +353,6 @@ if __name__ == '__main__':
|
||||
|
||||
runner = NovaTestRunner(stream=c.stream,
|
||||
verbosity=c.verbosity,
|
||||
config=c)
|
||||
config=c,
|
||||
show_elapsed=show_elapsed)
|
||||
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
|
||||
|
10
run_tests.sh
@ -10,6 +10,7 @@ function usage {
|
||||
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
|
||||
echo " -p, --pep8 Just run pep8"
|
||||
echo " -h, --help Print this usage message"
|
||||
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
|
||||
echo ""
|
||||
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
|
||||
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
|
||||
@ -24,6 +25,7 @@ function process_option {
|
||||
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
|
||||
-f|--force) let force=1;;
|
||||
-p|--pep8) let just_pep8=1;;
|
||||
-*) noseopts="$noseopts $1";;
|
||||
*) noseargs="$noseargs $1"
|
||||
esac
|
||||
}
|
||||
@ -34,6 +36,7 @@ always_venv=0
|
||||
never_venv=0
|
||||
force=0
|
||||
noseargs=
|
||||
noseopts=
|
||||
wrapper=""
|
||||
just_pep8=0
|
||||
|
||||
@ -72,7 +75,7 @@ function run_pep8 {
|
||||
--exclude=vcsversion.py ${srcfiles}
|
||||
}
|
||||
|
||||
NOSETESTS="python run_tests.py $noseargs"
|
||||
NOSETESTS="python run_tests.py $noseopts $noseargs"
|
||||
|
||||
if [ $never_venv -eq 0 ]
|
||||
then
|
||||
@ -107,7 +110,10 @@ fi
|
||||
|
||||
run_tests || exit
|
||||
|
||||
# Also run pep8 if no options were provided.
|
||||
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
|
||||
# not when we're running tests individually. To handle this, we need to
|
||||
# distinguish between options (noseopts), which begin with a '-', and
|
||||
# arguments (noseargs).
|
||||
if [ -z "$noseargs" ]; then
|
||||
run_pep8
|
||||
fi
|
||||
|
@ -10,7 +10,7 @@ boto==1.9b
|
||||
carrot==0.10.5
|
||||
eventlet==0.9.12
|
||||
lockfile==0.8
|
||||
python-novaclient==2.3
|
||||
python-novaclient==2.5.3
|
||||
python-daemon==1.5.5
|
||||
python-gflags==1.3
|
||||
redis==2.0.0
|
||||
|