merge with trey
@ -1,283 +0,0 @@
|
||||
source/api/nova..adminclient.rst
|
||||
source/api/nova..api.direct.rst
|
||||
source/api/nova..api.ec2.admin.rst
|
||||
source/api/nova..api.ec2.apirequest.rst
|
||||
source/api/nova..api.ec2.cloud.rst
|
||||
source/api/nova..api.ec2.metadatarequesthandler.rst
|
||||
source/api/nova..api.openstack.auth.rst
|
||||
source/api/nova..api.openstack.backup_schedules.rst
|
||||
source/api/nova..api.openstack.common.rst
|
||||
source/api/nova..api.openstack.consoles.rst
|
||||
source/api/nova..api.openstack.faults.rst
|
||||
source/api/nova..api.openstack.flavors.rst
|
||||
source/api/nova..api.openstack.images.rst
|
||||
source/api/nova..api.openstack.servers.rst
|
||||
source/api/nova..api.openstack.shared_ip_groups.rst
|
||||
source/api/nova..api.openstack.zones.rst
|
||||
source/api/nova..auth.dbdriver.rst
|
||||
source/api/nova..auth.fakeldap.rst
|
||||
source/api/nova..auth.ldapdriver.rst
|
||||
source/api/nova..auth.manager.rst
|
||||
source/api/nova..auth.signer.rst
|
||||
source/api/nova..cloudpipe.pipelib.rst
|
||||
source/api/nova..compute.api.rst
|
||||
source/api/nova..compute.instance_types.rst
|
||||
source/api/nova..compute.manager.rst
|
||||
source/api/nova..compute.monitor.rst
|
||||
source/api/nova..compute.power_state.rst
|
||||
source/api/nova..console.api.rst
|
||||
source/api/nova..console.fake.rst
|
||||
source/api/nova..console.manager.rst
|
||||
source/api/nova..console.xvp.rst
|
||||
source/api/nova..context.rst
|
||||
source/api/nova..crypto.rst
|
||||
source/api/nova..db.api.rst
|
||||
source/api/nova..db.base.rst
|
||||
source/api/nova..db.migration.rst
|
||||
source/api/nova..db.sqlalchemy.api.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
|
||||
source/api/nova..db.sqlalchemy.migration.rst
|
||||
source/api/nova..db.sqlalchemy.models.rst
|
||||
source/api/nova..db.sqlalchemy.session.rst
|
||||
source/api/nova..exception.rst
|
||||
source/api/nova..fakememcache.rst
|
||||
source/api/nova..fakerabbit.rst
|
||||
source/api/nova..flags.rst
|
||||
source/api/nova..image.glance.rst
|
||||
source/api/nova..image.local.rst
|
||||
source/api/nova..image.s3.rst
|
||||
source/api/nova..image.service.rst
|
||||
source/api/nova..log.rst
|
||||
source/api/nova..manager.rst
|
||||
source/api/nova..network.api.rst
|
||||
source/api/nova..network.linux_net.rst
|
||||
source/api/nova..network.manager.rst
|
||||
source/api/nova..objectstore.bucket.rst
|
||||
source/api/nova..objectstore.handler.rst
|
||||
source/api/nova..objectstore.image.rst
|
||||
source/api/nova..objectstore.stored.rst
|
||||
source/api/nova..quota.rst
|
||||
source/api/nova..rpc.rst
|
||||
source/api/nova..scheduler.chance.rst
|
||||
source/api/nova..scheduler.driver.rst
|
||||
source/api/nova..scheduler.manager.rst
|
||||
source/api/nova..scheduler.simple.rst
|
||||
source/api/nova..scheduler.zone.rst
|
||||
source/api/nova..service.rst
|
||||
source/api/nova..test.rst
|
||||
source/api/nova..tests.api.openstack.fakes.rst
|
||||
source/api/nova..tests.api.openstack.test_adminapi.rst
|
||||
source/api/nova..tests.api.openstack.test_api.rst
|
||||
source/api/nova..tests.api.openstack.test_auth.rst
|
||||
source/api/nova..tests.api.openstack.test_common.rst
|
||||
source/api/nova..tests.api.openstack.test_faults.rst
|
||||
source/api/nova..tests.api.openstack.test_flavors.rst
|
||||
source/api/nova..tests.api.openstack.test_images.rst
|
||||
source/api/nova..tests.api.openstack.test_ratelimiting.rst
|
||||
source/api/nova..tests.api.openstack.test_servers.rst
|
||||
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
|
||||
source/api/nova..tests.api.openstack.test_zones.rst
|
||||
source/api/nova..tests.api.test_wsgi.rst
|
||||
source/api/nova..tests.db.fakes.rst
|
||||
source/api/nova..tests.declare_flags.rst
|
||||
source/api/nova..tests.fake_flags.rst
|
||||
source/api/nova..tests.glance.stubs.rst
|
||||
source/api/nova..tests.hyperv_unittest.rst
|
||||
source/api/nova..tests.objectstore_unittest.rst
|
||||
source/api/nova..tests.real_flags.rst
|
||||
source/api/nova..tests.runtime_flags.rst
|
||||
source/api/nova..tests.test_access.rst
|
||||
source/api/nova..tests.test_api.rst
|
||||
source/api/nova..tests.test_auth.rst
|
||||
source/api/nova..tests.test_cloud.rst
|
||||
source/api/nova..tests.test_compute.rst
|
||||
source/api/nova..tests.test_console.rst
|
||||
source/api/nova..tests.test_direct.rst
|
||||
source/api/nova..tests.test_flags.rst
|
||||
source/api/nova..tests.test_instance_types.rst
|
||||
source/api/nova..tests.test_localization.rst
|
||||
source/api/nova..tests.test_log.rst
|
||||
source/api/nova..tests.test_middleware.rst
|
||||
source/api/nova..tests.test_misc.rst
|
||||
source/api/nova..tests.test_network.rst
|
||||
source/api/nova..tests.test_quota.rst
|
||||
source/api/nova..tests.test_rpc.rst
|
||||
source/api/nova..tests.test_scheduler.rst
|
||||
source/api/nova..tests.test_service.rst
|
||||
source/api/nova..tests.test_test.rst
|
||||
source/api/nova..tests.test_twistd.rst
|
||||
source/api/nova..tests.test_utils.rst
|
||||
source/api/nova..tests.test_virt.rst
|
||||
source/api/nova..tests.test_volume.rst
|
||||
source/api/nova..tests.test_xenapi.rst
|
||||
source/api/nova..tests.xenapi.stubs.rst
|
||||
source/api/nova..twistd.rst
|
||||
source/api/nova..utils.rst
|
||||
source/api/nova..version.rst
|
||||
source/api/nova..virt.connection.rst
|
||||
source/api/nova..virt.disk.rst
|
||||
source/api/nova..virt.fake.rst
|
||||
source/api/nova..virt.hyperv.rst
|
||||
source/api/nova..virt.images.rst
|
||||
source/api/nova..virt.libvirt_conn.rst
|
||||
source/api/nova..virt.xenapi.fake.rst
|
||||
source/api/nova..virt.xenapi.network_utils.rst
|
||||
source/api/nova..virt.xenapi.vm_utils.rst
|
||||
source/api/nova..virt.xenapi.vmops.rst
|
||||
source/api/nova..virt.xenapi.volume_utils.rst
|
||||
source/api/nova..virt.xenapi.volumeops.rst
|
||||
source/api/nova..virt.xenapi_conn.rst
|
||||
source/api/nova..volume.api.rst
|
||||
source/api/nova..volume.driver.rst
|
||||
source/api/nova..volume.manager.rst
|
||||
source/api/nova..volume.san.rst
|
||||
source/api/nova..wsgi.rst
|
||||
source/api/autoindex.rst
|
||||
source/api/nova..adminclient.rst
|
||||
source/api/nova..api.direct.rst
|
||||
source/api/nova..api.ec2.admin.rst
|
||||
source/api/nova..api.ec2.apirequest.rst
|
||||
source/api/nova..api.ec2.cloud.rst
|
||||
source/api/nova..api.ec2.metadatarequesthandler.rst
|
||||
source/api/nova..api.openstack.auth.rst
|
||||
source/api/nova..api.openstack.backup_schedules.rst
|
||||
source/api/nova..api.openstack.common.rst
|
||||
source/api/nova..api.openstack.consoles.rst
|
||||
source/api/nova..api.openstack.faults.rst
|
||||
source/api/nova..api.openstack.flavors.rst
|
||||
source/api/nova..api.openstack.images.rst
|
||||
source/api/nova..api.openstack.servers.rst
|
||||
source/api/nova..api.openstack.shared_ip_groups.rst
|
||||
source/api/nova..api.openstack.zones.rst
|
||||
source/api/nova..auth.dbdriver.rst
|
||||
source/api/nova..auth.fakeldap.rst
|
||||
source/api/nova..auth.ldapdriver.rst
|
||||
source/api/nova..auth.manager.rst
|
||||
source/api/nova..auth.signer.rst
|
||||
source/api/nova..cloudpipe.pipelib.rst
|
||||
source/api/nova..compute.api.rst
|
||||
source/api/nova..compute.instance_types.rst
|
||||
source/api/nova..compute.manager.rst
|
||||
source/api/nova..compute.monitor.rst
|
||||
source/api/nova..compute.power_state.rst
|
||||
source/api/nova..console.api.rst
|
||||
source/api/nova..console.fake.rst
|
||||
source/api/nova..console.manager.rst
|
||||
source/api/nova..console.xvp.rst
|
||||
source/api/nova..context.rst
|
||||
source/api/nova..crypto.rst
|
||||
source/api/nova..db.api.rst
|
||||
source/api/nova..db.base.rst
|
||||
source/api/nova..db.migration.rst
|
||||
source/api/nova..db.sqlalchemy.api.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
|
||||
source/api/nova..db.sqlalchemy.migration.rst
|
||||
source/api/nova..db.sqlalchemy.models.rst
|
||||
source/api/nova..db.sqlalchemy.session.rst
|
||||
source/api/nova..exception.rst
|
||||
source/api/nova..fakememcache.rst
|
||||
source/api/nova..fakerabbit.rst
|
||||
source/api/nova..flags.rst
|
||||
source/api/nova..image.glance.rst
|
||||
source/api/nova..image.local.rst
|
||||
source/api/nova..image.s3.rst
|
||||
source/api/nova..image.service.rst
|
||||
source/api/nova..log.rst
|
||||
source/api/nova..manager.rst
|
||||
source/api/nova..network.api.rst
|
||||
source/api/nova..network.linux_net.rst
|
||||
source/api/nova..network.manager.rst
|
||||
source/api/nova..objectstore.bucket.rst
|
||||
source/api/nova..objectstore.handler.rst
|
||||
source/api/nova..objectstore.image.rst
|
||||
source/api/nova..objectstore.stored.rst
|
||||
source/api/nova..quota.rst
|
||||
source/api/nova..rpc.rst
|
||||
source/api/nova..scheduler.chance.rst
|
||||
source/api/nova..scheduler.driver.rst
|
||||
source/api/nova..scheduler.manager.rst
|
||||
source/api/nova..scheduler.simple.rst
|
||||
source/api/nova..scheduler.zone.rst
|
||||
source/api/nova..service.rst
|
||||
source/api/nova..test.rst
|
||||
source/api/nova..tests.api.openstack.fakes.rst
|
||||
source/api/nova..tests.api.openstack.test_adminapi.rst
|
||||
source/api/nova..tests.api.openstack.test_api.rst
|
||||
source/api/nova..tests.api.openstack.test_auth.rst
|
||||
source/api/nova..tests.api.openstack.test_common.rst
|
||||
source/api/nova..tests.api.openstack.test_faults.rst
|
||||
source/api/nova..tests.api.openstack.test_flavors.rst
|
||||
source/api/nova..tests.api.openstack.test_images.rst
|
||||
source/api/nova..tests.api.openstack.test_ratelimiting.rst
|
||||
source/api/nova..tests.api.openstack.test_servers.rst
|
||||
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
|
||||
source/api/nova..tests.api.openstack.test_zones.rst
|
||||
source/api/nova..tests.api.test_wsgi.rst
|
||||
source/api/nova..tests.db.fakes.rst
|
||||
source/api/nova..tests.declare_flags.rst
|
||||
source/api/nova..tests.fake_flags.rst
|
||||
source/api/nova..tests.glance.stubs.rst
|
||||
source/api/nova..tests.hyperv_unittest.rst
|
||||
source/api/nova..tests.objectstore_unittest.rst
|
||||
source/api/nova..tests.real_flags.rst
|
||||
source/api/nova..tests.runtime_flags.rst
|
||||
source/api/nova..tests.test_access.rst
|
||||
source/api/nova..tests.test_api.rst
|
||||
source/api/nova..tests.test_auth.rst
|
||||
source/api/nova..tests.test_cloud.rst
|
||||
source/api/nova..tests.test_compute.rst
|
||||
source/api/nova..tests.test_console.rst
|
||||
source/api/nova..tests.test_direct.rst
|
||||
source/api/nova..tests.test_flags.rst
|
||||
source/api/nova..tests.test_instance_types.rst
|
||||
source/api/nova..tests.test_localization.rst
|
||||
source/api/nova..tests.test_log.rst
|
||||
source/api/nova..tests.test_middleware.rst
|
||||
source/api/nova..tests.test_misc.rst
|
||||
source/api/nova..tests.test_network.rst
|
||||
source/api/nova..tests.test_quota.rst
|
||||
source/api/nova..tests.test_rpc.rst
|
||||
source/api/nova..tests.test_scheduler.rst
|
||||
source/api/nova..tests.test_service.rst
|
||||
source/api/nova..tests.test_test.rst
|
||||
source/api/nova..tests.test_twistd.rst
|
||||
source/api/nova..tests.test_utils.rst
|
||||
source/api/nova..tests.test_virt.rst
|
||||
source/api/nova..tests.test_volume.rst
|
||||
source/api/nova..tests.test_xenapi.rst
|
||||
source/api/nova..tests.xenapi.stubs.rst
|
||||
source/api/nova..twistd.rst
|
||||
source/api/nova..utils.rst
|
||||
source/api/nova..version.rst
|
||||
source/api/nova..virt.connection.rst
|
||||
source/api/nova..virt.disk.rst
|
||||
source/api/nova..virt.fake.rst
|
||||
source/api/nova..virt.hyperv.rst
|
||||
source/api/nova..virt.images.rst
|
||||
source/api/nova..virt.libvirt_conn.rst
|
||||
source/api/nova..virt.xenapi.fake.rst
|
||||
source/api/nova..virt.xenapi.network_utils.rst
|
||||
source/api/nova..virt.xenapi.vm_utils.rst
|
||||
source/api/nova..virt.xenapi.vmops.rst
|
||||
source/api/nova..virt.xenapi.volume_utils.rst
|
||||
source/api/nova..virt.xenapi.volumeops.rst
|
||||
source/api/nova..virt.xenapi_conn.rst
|
||||
source/api/nova..volume.api.rst
|
||||
source/api/nova..volume.driver.rst
|
||||
source/api/nova..volume.manager.rst
|
||||
source/api/nova..volume.san.rst
|
||||
source/api/nova..wsgi.rst
|
@ -14,10 +14,16 @@
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
Source for illustrations in doc/source/image_src/zone_distsched_illustrations.odp
|
||||
(OpenOffice Impress format) Illustrations are "exported" to png and then scaled
|
||||
to 400x300 or 640x480 as needed and placed in the doc/source/images directory.
|
||||
|
||||
Distributed Scheduler
|
||||
=====
|
||||
=====================
|
||||
|
||||
The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
|
||||
The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Chance Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
|
||||
|
||||
.. image:: /images/dating_service.png
|
||||
|
||||
But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone.
|
||||
|
||||
@ -25,75 +31,87 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab
|
||||
|
||||
So, how does this all work?
|
||||
|
||||
This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the Zones documentation before reading this.
|
||||
This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the :doc:`devguide/zones` documentation before reading this.
|
||||
|
||||
.. image:: /images/zone_aware_scheduler.png
|
||||
|
||||
Costs & Weights
|
||||
----------
|
||||
When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to putting a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
|
||||
---------------
|
||||
When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
|
||||
|
||||
Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost.
|
||||
|
||||
An example of some other costs might include selecting:
|
||||
* a GPU-based host over a standard CPU
|
||||
* a host with fast ethernet over a 10mbps line
|
||||
* a host that can run Windows instances
|
||||
* a host in the EU vs North America
|
||||
* etc
|
||||
* a GPU-based host over a standard CPU
|
||||
* a host with fast ethernet over a 10mbps line
|
||||
* a host that can run Windows instances
|
||||
* a host in the EU vs North America
|
||||
* etc
|
||||
|
||||
This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly.
|
||||
|
||||
.. image:: /images/costs_weights.png
|
||||
|
||||
nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler
|
||||
-----------
|
||||
------------------------------------------------------
|
||||
As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions.
|
||||
|
||||
Here is how it works:
|
||||
|
||||
1. The compute nodes are filtered and the nodes remaining are weighed.
|
||||
1a. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
|
||||
1b. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
|
||||
2. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
|
||||
3. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
|
||||
4. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
|
||||
1. The compute nodes are filtered and the nodes remaining are weighed.
|
||||
2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
|
||||
3. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
|
||||
4. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
|
||||
5. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
|
||||
6. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
|
||||
|
||||
.. image:: /images/zone_aware_overview.png
|
||||
|
||||
`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used.
|
||||
|
||||
Filtering and Weighing
|
||||
------------
|
||||
----------------------
|
||||
The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible.
|
||||
|
||||
.. image:: /images/filtering.png
|
||||
|
||||
Requesting a new instance
|
||||
------------
|
||||
-------------------------
|
||||
Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table.
|
||||
|
||||
`nova.compute.api.create()` performed the following actions:
|
||||
1. it validated all the fields passed into it.
|
||||
2. it created an entry in the `Instance` table for each instance requested
|
||||
3. it put one `run_instance` message in the scheduler queue for each instance requested
|
||||
4. the schedulers picked off the messages and decided which compute node should handle the request.
|
||||
5. the `run_instance` message was forwarded to the compute node for processing and the instance is created.
|
||||
6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`s are valid.
|
||||
1. it validated all the fields passed into it.
|
||||
2. it created an entry in the `Instance` table for each instance requested
|
||||
3. it put one `run_instance` message in the scheduler queue for each instance requested
|
||||
4. the schedulers picked off the messages and decided which compute node should handle the request.
|
||||
5. the `run_instance` message was forwarded to the compute node for processing and the instance is created.
|
||||
6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_ids` are valid.
|
||||
|
||||
.. image:: /images/nova.compute.api.create.png
|
||||
|
||||
Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones.
|
||||
|
||||
The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once.
|
||||
|
||||
For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently:
|
||||
1. it validates all the fields passed into it.
|
||||
2. it creates a single `reservation_id` for all of instances created. This is a UUID.
|
||||
3. it creates a single `run_instance` request in the scheduler queue
|
||||
4. a scheduler picks the message off the queue and works on it.
|
||||
5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`.
|
||||
6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones.
|
||||
7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well.
|
||||
8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed.
|
||||
9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user.
|
||||
10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`.
|
||||
1. it validates all the fields passed into it.
|
||||
2. it creates a single `reservation_id` for all of instances created. This is a UUID.
|
||||
3. it creates a single `run_instance` request in the scheduler queue
|
||||
4. a scheduler picks the message off the queue and works on it.
|
||||
5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`.
|
||||
6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones.
|
||||
7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well.
|
||||
8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed.
|
||||
9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user.
|
||||
10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`.
|
||||
|
||||
.. image:: /images/nova.compute.api.create_all_at_once.png
|
||||
|
||||
The Catch
|
||||
-------------
|
||||
---------
|
||||
This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world.
|
||||
|
||||
When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates.
|
||||
When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates.
|
||||
|
||||
Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key`
|
||||
|
||||
@ -108,7 +126,7 @@ NOTE: The features described in this section are related to the up-coming 'merge
|
||||
|
||||
The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created.
|
||||
|
||||
NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
|
||||
NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would not be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
|
||||
|
||||
We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches.
|
||||
|
||||
@ -117,7 +135,7 @@ Finally, we need to give the user a way to get information on each of the instan
|
||||
`python-novaclient` will be extended to support both of these changes.
|
||||
|
||||
Host Filter
|
||||
--------------
|
||||
-----------
|
||||
|
||||
As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms.
|
||||
|
||||
@ -130,21 +148,22 @@ The filter used is determined by the `--default_host_filter` flag, which points
|
||||
To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (`<hostname>`, `<additional data>`) where `<additional data>` is whatever you want it to be.
|
||||
|
||||
Cost Scheduler Weighing
|
||||
--------------
|
||||
-----------------------
|
||||
Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled.
|
||||
|
||||
Simple Zone Aware Scheduling
|
||||
--------------
|
||||
The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
|
||||
----------------------------
|
||||
The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
|
||||
|
||||
The `--scheduler_driver` flag is how you specify the scheduler class name.
|
||||
|
||||
Flags
|
||||
--------------
|
||||
-----
|
||||
|
||||
All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file:
|
||||
|
||||
::
|
||||
|
||||
--allow_admin_api=true
|
||||
--enable_zone_routing=true
|
||||
--zone_name=zone1
|
||||
@ -162,6 +181,7 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf
|
||||
Some optional flags which are handy for debugging are:
|
||||
|
||||
::
|
||||
|
||||
--connection_type=fake
|
||||
--verbose
|
||||
|
||||
|
@ -21,7 +21,7 @@ A Nova deployment is called a Zone. A Zone allows you to partition your deployme
|
||||
|
||||
The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion.
|
||||
|
||||
Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones.
|
||||
Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones and, in those cases, the grandchild's internal structure would not be known to the grand-parent.
|
||||
|
||||
Zones share nothing. They communicate via the public OpenStack API only. No database, queue, user or project definition is shared between Zones.
|
||||
|
||||
@ -99,7 +99,7 @@ You can get the `child zone api url`, `nova api key` and `username` from the `no
|
||||
export NOVA_URL="http://192.168.2.120:8774/v1.0/"
|
||||
|
||||
|
||||
This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
|
||||
This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done with this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
|
||||
|
||||
Getting a list of child Zones
|
||||
-----------------------------
|
||||
|
BIN
doc/source/image_src/zones_distsched_illustrations.odp
Executable file
BIN
doc/source/images/costs_weights.png
Normal file
After Width: | Height: | Size: 35 KiB |
BIN
doc/source/images/dating_service.png
Normal file
After Width: | Height: | Size: 31 KiB |
BIN
doc/source/images/filtering.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
doc/source/images/nova.compute.api.create.png
Executable file
After Width: | Height: | Size: 49 KiB |
BIN
doc/source/images/nova.compute.api.create_all_at_once.png
Executable file
After Width: | Height: | Size: 61 KiB |
BIN
doc/source/images/zone_aware_overview.png
Executable file
After Width: | Height: | Size: 55 KiB |
BIN
doc/source/images/zone_aware_scheduler.png
Normal file
After Width: | Height: | Size: 20 KiB |
@ -39,6 +39,7 @@ from nova import flags
|
||||
from nova import ipv6
|
||||
from nova import log as logging
|
||||
from nova import network
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova import volume
|
||||
from nova.api.ec2 import ec2utils
|
||||
@ -120,8 +121,8 @@ class CloudController(object):
|
||||
result = {}
|
||||
for instance in self.compute_api.get_all(context,
|
||||
project_id=project_id):
|
||||
if instance['fixed_ip']:
|
||||
line = '%s slots=%d' % (instance['fixed_ip']['address'],
|
||||
if instance['fixed_ips']:
|
||||
line = '%s slots=%d' % (instance['fixed_ips'][0]['address'],
|
||||
instance['vcpus'])
|
||||
key = str(instance['key_name'])
|
||||
if key in result:
|
||||
@ -792,15 +793,15 @@ class CloudController(object):
|
||||
'name': instance['state_description']}
|
||||
fixed_addr = None
|
||||
floating_addr = None
|
||||
if instance['fixed_ip']:
|
||||
fixed_addr = instance['fixed_ip']['address']
|
||||
if instance['fixed_ip']['floating_ips']:
|
||||
fixed = instance['fixed_ip']
|
||||
if instance['fixed_ips']:
|
||||
fixed = instance['fixed_ips'][0]
|
||||
fixed_addr = fixed['address']
|
||||
if fixed['floating_ips']:
|
||||
floating_addr = fixed['floating_ips'][0]['address']
|
||||
if instance['fixed_ip']['network'] and 'use_v6' in kwargs:
|
||||
if fixed['network'] and 'use_v6' in kwargs:
|
||||
i['dnsNameV6'] = ipv6.to_global(
|
||||
instance['fixed_ip']['network']['cidr_v6'],
|
||||
instance['fixed_ip']['virtual_interface']['address'],
|
||||
fixed['network']['cidr_v6'],
|
||||
fixed['virtual_interface']['address'],
|
||||
instance['project_id'])
|
||||
|
||||
i['privateDnsName'] = fixed_addr
|
||||
@ -872,8 +873,14 @@ class CloudController(object):
|
||||
|
||||
def allocate_address(self, context, **kwargs):
|
||||
LOG.audit(_("Allocate address"), context=context)
|
||||
public_ip = self.network_api.allocate_floating_ip(context)
|
||||
return {'publicIp': public_ip}
|
||||
try:
|
||||
public_ip = self.network_api.allocate_floating_ip(context)
|
||||
return {'publicIp': public_ip}
|
||||
except rpc.RemoteError as ex:
|
||||
if ex.exc_type == 'NoMoreAddresses':
|
||||
raise exception.NoMoreFloatingIps()
|
||||
else:
|
||||
raise
|
||||
|
||||
def release_address(self, context, public_ip, **kwargs):
|
||||
LOG.audit(_("Release address %s"), public_ip, context=context)
|
||||
|
@ -99,7 +99,7 @@ def create_resource(version='1.0'):
|
||||
|
||||
serializers = {
|
||||
'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
|
||||
metadata=metadata)
|
||||
metadata=metadata),
|
||||
}
|
||||
|
||||
return wsgi.Resource(controller, serializers=serializers)
|
||||
|
@ -37,12 +37,18 @@ class Controller(object):
|
||||
meta_dict[key] = value
|
||||
return dict(metadata=meta_dict)
|
||||
|
||||
def _check_body(self, body):
|
||||
if body == None or body == "":
|
||||
expl = _('No Request Body')
|
||||
raise exc.HTTPBadRequest(explanation=expl)
|
||||
|
||||
def index(self, req, server_id):
|
||||
""" Returns the list of metadata for a given instance """
|
||||
context = req.environ['nova.context']
|
||||
return self._get_metadata(context, server_id)
|
||||
|
||||
def create(self, req, server_id, body):
|
||||
self._check_body(body)
|
||||
context = req.environ['nova.context']
|
||||
metadata = body.get('metadata')
|
||||
try:
|
||||
@ -51,9 +57,10 @@ class Controller(object):
|
||||
metadata)
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
return req.body
|
||||
return body
|
||||
|
||||
def update(self, req, server_id, id, body):
|
||||
self._check_body(body)
|
||||
context = req.environ['nova.context']
|
||||
if not id in body:
|
||||
expl = _('Request body and URI mismatch')
|
||||
@ -68,7 +75,7 @@ class Controller(object):
|
||||
except quota.QuotaError as error:
|
||||
self._handle_quota_error(error)
|
||||
|
||||
return req.body
|
||||
return body
|
||||
|
||||
def show(self, req, server_id, id):
|
||||
""" Return a single metadata item """
|
||||
|
@ -60,7 +60,7 @@ class TextDeserializer(object):
|
||||
|
||||
def deserialize(self, datastring, action='default'):
|
||||
"""Find local deserialization method and parse request body."""
|
||||
action_method = getattr(self, action, self.default)
|
||||
action_method = getattr(self, str(action), self.default)
|
||||
return action_method(datastring)
|
||||
|
||||
def default(self, datastring):
|
||||
@ -189,7 +189,7 @@ class DictSerializer(object):
|
||||
|
||||
def serialize(self, data, action='default'):
|
||||
"""Find local serialization method and encode response body."""
|
||||
action_method = getattr(self, action, self.default)
|
||||
action_method = getattr(self, str(action), self.default)
|
||||
return action_method(data)
|
||||
|
||||
def default(self, data):
|
||||
@ -225,7 +225,7 @@ class XMLDictSerializer(DictSerializer):
|
||||
if not xmlns and self.xmlns:
|
||||
node.setAttribute('xmlns', self.xmlns)
|
||||
|
||||
return node.toprettyxml(indent=' ')
|
||||
return node.toprettyxml(indent=' ', encoding='utf-8')
|
||||
|
||||
def _to_xml_node(self, doc, metadata, nodename, data):
|
||||
"""Recursive method to convert data members to XML nodes."""
|
||||
@ -296,7 +296,7 @@ class ResponseSerializer(object):
|
||||
}
|
||||
self.serializers.update(serializers or {})
|
||||
|
||||
def serialize(self, response_data, content_type):
|
||||
def serialize(self, response_data, content_type, action='default'):
|
||||
"""Serialize a dict into a string and wrap in a wsgi.Request object.
|
||||
|
||||
:param response_data: dict produced by the Controller
|
||||
@ -307,7 +307,7 @@ class ResponseSerializer(object):
|
||||
response.headers['Content-Type'] = content_type
|
||||
|
||||
serializer = self.get_serializer(content_type)
|
||||
response.body = serializer.serialize(response_data)
|
||||
response.body = serializer.serialize(response_data, action)
|
||||
|
||||
return response
|
||||
|
||||
@ -358,7 +358,7 @@ class Resource(wsgi.Application):
|
||||
|
||||
#TODO(bcwaldon): find a more elegant way to pass through non-dict types
|
||||
if type(action_result) is dict:
|
||||
response = self.serializer.serialize(action_result, accept)
|
||||
response = self.serializer.serialize(action_result, accept, action)
|
||||
else:
|
||||
response = action_result
|
||||
|
||||
|
@ -263,7 +263,7 @@ class API(base.Base):
|
||||
'instance_type': instance_type,
|
||||
'filter': filter_class,
|
||||
'blob': zone_blob,
|
||||
'num_instances': num_instances
|
||||
'num_instances': num_instances,
|
||||
}
|
||||
|
||||
rpc.cast(context,
|
||||
|
@ -596,8 +596,10 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
|
||||
# reload the updated instance ref
|
||||
# FIXME(mdietz): is there reload functionality?
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
self.driver.finish_resize(instance_ref, disk_info)
|
||||
instance = self.db.instance_get(context, instance_id)
|
||||
network_info = self.network_api.get_instance_nw_info(context,
|
||||
instance)
|
||||
self.driver.finish_resize(instance, disk_info, network_info)
|
||||
|
||||
self.db.migration_update(context, migration_id,
|
||||
{'status': 'finished', })
|
||||
|
@ -176,7 +176,8 @@ def revoke_certs_by_project(project_id):
|
||||
def revoke_certs_by_user_and_project(user_id, project_id):
|
||||
"""Revoke certs for user in project."""
|
||||
admin = context.get_admin_context()
|
||||
for cert in db.certificate_get_all_by_user(admin, user_id, project_id):
|
||||
for cert in db.certificate_get_all_by_user_and_project(admin,
|
||||
user_id, project_id):
|
||||
revoke_cert(cert['project_id'], cert['file_name'])
|
||||
|
||||
|
||||
|
@ -224,13 +224,13 @@ def certificate_update(context, certificate_id, values):
|
||||
###################
|
||||
|
||||
|
||||
def floating_ip_allocate_address(context, host, project_id):
|
||||
def floating_ip_allocate_address(context, project_id):
|
||||
"""Allocate free floating ip and return the address.
|
||||
|
||||
Raises if one is not available.
|
||||
|
||||
"""
|
||||
return IMPL.floating_ip_allocate_address(context, host, project_id)
|
||||
return IMPL.floating_ip_allocate_address(context, project_id)
|
||||
|
||||
|
||||
def floating_ip_create(context, values):
|
||||
@ -433,9 +433,9 @@ def virtual_interface_get_by_instance(context, instance_id):
|
||||
def virtual_interface_get_by_instance_and_network(context, instance_id,
|
||||
network_id):
|
||||
"""gets all virtual interfaces for instance"""
|
||||
return IMPL.virtual_interfaces_get_by_instance_and_network(context,
|
||||
instance_id,
|
||||
network_id)
|
||||
return IMPL.virtual_interface_get_by_instance_and_network(context,
|
||||
instance_id,
|
||||
network_id)
|
||||
|
||||
|
||||
def virtual_interface_get_by_network(context, network_id):
|
||||
|
@ -433,7 +433,7 @@ def certificate_update(context, certificate_id, values):
|
||||
|
||||
|
||||
@require_context
|
||||
def floating_ip_allocate_address(context, host, project_id):
|
||||
def floating_ip_allocate_address(context, project_id):
|
||||
authorize_project_context(context, project_id)
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
@ -448,7 +448,6 @@ def floating_ip_allocate_address(context, host, project_id):
|
||||
if not floating_ip_ref:
|
||||
raise db.NoMoreAddresses()
|
||||
floating_ip_ref['project_id'] = project_id
|
||||
floating_ip_ref['host'] = host
|
||||
session.add(floating_ip_ref)
|
||||
return floating_ip_ref['address']
|
||||
|
||||
|
@ -48,7 +48,7 @@ virtual_interfaces = Table('virtual_interfaces', meta,
|
||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False),
|
||||
unique=True),
|
||||
)
|
||||
mysql_engine='InnoDB')
|
||||
|
||||
|
||||
# bridge_interface column to add to networks table
|
||||
|
@ -184,13 +184,6 @@ class Instance(BASE, NovaBase):
|
||||
def project(self):
|
||||
return auth.manager.AuthManager().get_project(self.project_id)
|
||||
|
||||
#TODO{tr3buchet): i don't like this shim.....
|
||||
# prevents breaking ec2 api
|
||||
# should go away with zones when ec2 api doesn't have compute db access
|
||||
@property
|
||||
def fixed_ip(self):
|
||||
return self.fixed_ips[0] if self.fixed_ips else None
|
||||
|
||||
image_ref = Column(String(255))
|
||||
kernel_id = Column(String(255))
|
||||
ramdisk_id = Column(String(255))
|
||||
|
@ -396,6 +396,10 @@ class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined):
|
||||
message = _("Zero floating ips defined for instance %(instance_id)s.")
|
||||
|
||||
|
||||
class NoMoreFloatingIps(NotFound):
|
||||
message = _("Zero floating ips available.")
|
||||
|
||||
|
||||
class KeypairNotFound(NotFound):
|
||||
message = _("Keypair %(keypair_name)s not found for user %(user_id)s")
|
||||
|
||||
|
@ -270,8 +270,10 @@ DEFINE_list('region_list',
|
||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||
DEFINE_integer('glance_port', 9292, 'glance port')
|
||||
DEFINE_string('glance_host', '$my_ip', 'glance host')
|
||||
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
|
||||
DEFINE_list('glance_api_servers',
|
||||
['127.0.0.1:9292'],
|
||||
'list of glance api servers available to nova (host:port)')
|
||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
|
||||
DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
|
||||
|
@ -22,6 +22,7 @@ import nova
|
||||
from nova import exception
|
||||
from nova import utils
|
||||
from nova import flags
|
||||
from nova.image import glance as glance_image_service
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
@ -48,6 +49,8 @@ def get_default_image_service():
|
||||
return ImageService()
|
||||
|
||||
|
||||
# FIXME(sirp): perhaps this should be moved to nova/images/glance so that we
|
||||
# keep Glance specific code together for the most part
|
||||
def get_glance_client(image_href):
|
||||
"""Get the correct glance client and id for the given image_href.
|
||||
|
||||
@ -62,7 +65,9 @@ def get_glance_client(image_href):
|
||||
"""
|
||||
image_href = image_href or 0
|
||||
if str(image_href).isdigit():
|
||||
glance_client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
|
||||
glance_host, glance_port = \
|
||||
glance_image_service.pick_glance_api_server()
|
||||
glance_client = GlanceClient(glance_host, glance_port)
|
||||
return (glance_client, int(image_href))
|
||||
|
||||
try:
|
||||
|
@ -20,6 +20,7 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import datetime
|
||||
import random
|
||||
|
||||
from glance.common import exception as glance_exception
|
||||
|
||||
@ -39,6 +40,21 @@ FLAGS = flags.FLAGS
|
||||
GlanceClient = utils.import_class('glance.client.Client')
|
||||
|
||||
|
||||
def pick_glance_api_server():
|
||||
"""Return which Glance API server to use for the request
|
||||
|
||||
This method provides a very primitive form of load-balancing suitable for
|
||||
testing and sandbox environments. In production, it would be better to use
|
||||
one IP and route that to a real load-balancer.
|
||||
|
||||
Returns (host, port)
|
||||
"""
|
||||
host_port = random.choice(FLAGS.glance_api_servers)
|
||||
host, port_str = host_port.split(':')
|
||||
port = int(port_str)
|
||||
return host, port
|
||||
|
||||
|
||||
class GlanceImageService(service.BaseImageService):
|
||||
"""Provides storage and retrieval of disk image objects within Glance."""
|
||||
|
||||
@ -51,12 +67,21 @@ class GlanceImageService(service.BaseImageService):
|
||||
GLANCE_ONLY_ATTRS
|
||||
|
||||
def __init__(self, client=None):
|
||||
# FIXME(sirp): can we avoid dependency-injection here by using
|
||||
# stubbing out a fake?
|
||||
if client is None:
|
||||
self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
|
||||
else:
|
||||
self.client = client
|
||||
self._client = client
|
||||
|
||||
def _get_client(self):
|
||||
# NOTE(sirp): we want to load balance each request across glance
|
||||
# servers. Since GlanceImageService is a long-lived object, `client`
|
||||
# is made to choose a new server each time via this property.
|
||||
if self._client is not None:
|
||||
return self._client
|
||||
glance_host, glance_port = pick_glance_api_server()
|
||||
return GlanceClient(glance_host, glance_port)
|
||||
|
||||
def _set_client(self, client):
|
||||
self._client = client
|
||||
|
||||
client = property(_get_client, _set_client)
|
||||
|
||||
def index(self, context, filters=None, marker=None, limit=None):
|
||||
"""Calls out to Glance for a list of images available."""
|
||||
|
@ -197,7 +197,7 @@ class FloatingIP(object):
|
||||
fixed_ip = fixed_ips[0] if fixed_ips else None
|
||||
|
||||
# call to correct network host to associate the floating ip
|
||||
network_api.associate_floating_ip(context,
|
||||
self.network_api.associate_floating_ip(context,
|
||||
floating_ip,
|
||||
fixed_ip,
|
||||
affect_auto_assigned=True)
|
||||
@ -220,10 +220,12 @@ class FloatingIP(object):
|
||||
# disassociate floating ips related to fixed_ip
|
||||
for floating_ip in fixed_ip.floating_ips:
|
||||
address = floating_ip['address']
|
||||
network_api.disassociate_floating_ip(context, address)
|
||||
self.network_api.disassociate_floating_ip(context, address)
|
||||
# deallocate if auto_assigned
|
||||
if floating_ip['auto_assigned']:
|
||||
network_api.release_floating_ip(context, address, True)
|
||||
self.network_api.release_floating_ip(context,
|
||||
address,
|
||||
True)
|
||||
|
||||
# call the next inherited class's deallocate_for_instance()
|
||||
# which is currently the NetworkManager version
|
||||
@ -242,7 +244,6 @@ class FloatingIP(object):
|
||||
'allocate any more addresses'))
|
||||
# TODO(vish): add floating ips through manage command
|
||||
return self.db.floating_ip_allocate_address(context,
|
||||
self.host,
|
||||
project_id)
|
||||
|
||||
def associate_floating_ip(self, context, floating_address, fixed_address):
|
||||
@ -284,6 +285,7 @@ class NetworkManager(manager.SchedulerDependentManager):
|
||||
if not network_driver:
|
||||
network_driver = FLAGS.network_driver
|
||||
self.driver = utils.import_object(network_driver)
|
||||
self.network_api = network_api.API()
|
||||
super(NetworkManager, self).__init__(service_name='network',
|
||||
*args, **kwargs)
|
||||
|
||||
@ -418,7 +420,8 @@ class NetworkManager(manager.SchedulerDependentManager):
|
||||
"enabled": "1"}
|
||||
network_dict = {
|
||||
'bridge': network['bridge'],
|
||||
'id': network['id']}
|
||||
'id': network['id'],
|
||||
'injected': network['injected']}
|
||||
info = {
|
||||
'label': network['label'],
|
||||
'gateway': network['gateway'],
|
||||
|
@ -89,6 +89,7 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
self.assertEqual('value1', res_dict['metadata']['key1'])
|
||||
|
||||
def test_index_no_data(self):
|
||||
@ -99,6 +100,7 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
self.assertEqual(0, len(res_dict['metadata']))
|
||||
|
||||
def test_show(self):
|
||||
@ -109,6 +111,7 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
self.assertEqual('value5', res_dict['key5'])
|
||||
|
||||
def test_show_meta_not_found(self):
|
||||
@ -140,8 +143,19 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
self.assertEqual('value1', res_dict['metadata']['key1'])
|
||||
|
||||
def test_create_empty_body(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
|
||||
return_create_instance_metadata)
|
||||
req = webob.Request.blank('/v1.1/servers/1/meta')
|
||||
req.environ['api.version'] = '1.1'
|
||||
req.method = 'POST'
|
||||
req.headers["content-type"] = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(400, res.status_int)
|
||||
|
||||
def test_update_item(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
|
||||
return_create_instance_metadata)
|
||||
@ -152,9 +166,20 @@ class ServerMetaDataTest(unittest.TestCase):
|
||||
req.headers["content-type"] = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(200, res.status_int)
|
||||
self.assertEqual('application/json', res.headers['Content-Type'])
|
||||
res_dict = json.loads(res.body)
|
||||
self.assertEqual('value1', res_dict['key1'])
|
||||
|
||||
def test_update_item_empty_body(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
|
||||
return_create_instance_metadata)
|
||||
req = webob.Request.blank('/v1.1/servers/1/meta/key1')
|
||||
req.environ['api.version'] = '1.1'
|
||||
req.method = 'PUT'
|
||||
req.headers["content-type"] = "application/json"
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(400, res.status_int)
|
||||
|
||||
def test_update_item_too_many_keys(self):
|
||||
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
|
||||
return_create_instance_metadata)
|
||||
|
@ -89,6 +89,12 @@ class DictSerializerTest(test.TestCase):
|
||||
serializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(serializer.serialize({}, 'update'), 'trousers')
|
||||
|
||||
def test_dispatch_action_None(self):
|
||||
serializer = wsgi.DictSerializer()
|
||||
serializer.create = lambda x: 'pants'
|
||||
serializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(serializer.serialize({}, None), 'trousers')
|
||||
|
||||
|
||||
class XMLDictSerializerTest(test.TestCase):
|
||||
def test_xml(self):
|
||||
@ -123,6 +129,12 @@ class TextDeserializerTest(test.TestCase):
|
||||
deserializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers')
|
||||
|
||||
def test_dispatch_action_None(self):
|
||||
deserializer = wsgi.TextDeserializer()
|
||||
deserializer.create = lambda x: 'pants'
|
||||
deserializer.default = lambda x: 'trousers'
|
||||
self.assertEqual(deserializer.deserialize({}, None), 'trousers')
|
||||
|
||||
|
||||
class JSONDeserializerTest(test.TestCase):
|
||||
def test_json(self):
|
||||
@ -171,11 +183,11 @@ class XMLDeserializerTest(test.TestCase):
|
||||
class ResponseSerializerTest(test.TestCase):
|
||||
def setUp(self):
|
||||
class JSONSerializer(object):
|
||||
def serialize(self, data):
|
||||
def serialize(self, data, action='default'):
|
||||
return 'pew_json'
|
||||
|
||||
class XMLSerializer(object):
|
||||
def serialize(self, data):
|
||||
def serialize(self, data, action='default'):
|
||||
return 'pew_xml'
|
||||
|
||||
self.serializers = {
|
||||
@ -211,11 +223,11 @@ class ResponseSerializerTest(test.TestCase):
|
||||
class RequestDeserializerTest(test.TestCase):
|
||||
def setUp(self):
|
||||
class JSONDeserializer(object):
|
||||
def deserialize(self, data):
|
||||
def deserialize(self, data, action='default'):
|
||||
return 'pew_json'
|
||||
|
||||
class XMLDeserializer(object):
|
||||
def deserialize(self, data):
|
||||
def deserialize(self, data, action='default'):
|
||||
return 'pew_xml'
|
||||
|
||||
self.deserializers = {
|
||||
|
@ -68,16 +68,18 @@ def stub_out_db_network_api(stubs):
|
||||
'dns': '192.168.0.1',
|
||||
'vlan': None,
|
||||
'host': None,
|
||||
'injected': False,
|
||||
'vpn_public_address': '192.168.0.2'}
|
||||
|
||||
fixed_ip_fields = {'id': 0,
|
||||
'network_id': 0,
|
||||
'network': FakeModel(network_fields),
|
||||
'address': '192.168.0.100',
|
||||
'instance': False,
|
||||
'instance_id': 0,
|
||||
'allocated': False,
|
||||
'mac_address_id': 0,
|
||||
'mac_address': None,
|
||||
'virtual_interface_id': 0,
|
||||
'virtual_interface': None,
|
||||
'floating_ips': []}
|
||||
|
||||
flavor_fields = {'id': 0,
|
||||
@ -85,20 +87,20 @@ def stub_out_db_network_api(stubs):
|
||||
|
||||
floating_ip_fields = {'id': 0,
|
||||
'address': '192.168.1.100',
|
||||
'fixed_ip_id': 0,
|
||||
'fixed_ip_id': None,
|
||||
'fixed_ip': None,
|
||||
'project_id': 'fake',
|
||||
'project_id': None,
|
||||
'auto_assigned': False}
|
||||
|
||||
mac_address_fields = {'id': 0,
|
||||
'address': 'DE:AD:BE:EF:00:00',
|
||||
'network_id': 0,
|
||||
'instance_id': 0,
|
||||
'network': FakeModel(network_fields)}
|
||||
virtual_interface_fields = {'id': 0,
|
||||
'address': 'DE:AD:BE:EF:00:00',
|
||||
'network_id': 0,
|
||||
'instance_id': 0,
|
||||
'network': FakeModel(network_fields)}
|
||||
|
||||
fixed_ips = [fixed_ip_fields]
|
||||
floating_ips = [floating_ip_fields]
|
||||
mac_addresses = [mac_address_fields]
|
||||
virtual_interfacees = [virtual_interface_fields]
|
||||
networks = [network_fields]
|
||||
|
||||
def fake_floating_ip_allocate_address(context, project_id):
|
||||
@ -108,7 +110,7 @@ def stub_out_db_network_api(stubs):
|
||||
if not ips:
|
||||
raise db.NoMoreAddresses()
|
||||
ips[0]['project_id'] = project_id
|
||||
return FakeModel(ips[0]['address'])
|
||||
return FakeModel(ips[0])
|
||||
|
||||
def fake_floating_ip_deallocate(context, address):
|
||||
ips = filter(lambda i: i['address'] == address,
|
||||
@ -144,6 +146,9 @@ def stub_out_db_network_api(stubs):
|
||||
pass
|
||||
|
||||
def fake_floating_ip_get_by_address(context, address):
|
||||
if isinstance(address, FakeModel):
|
||||
# NOTE(tr3buchet): yo dawg, i heard you like addresses
|
||||
address = address['address']
|
||||
ips = filter(lambda i: i['address'] == address,
|
||||
floating_ips)
|
||||
if not ips:
|
||||
@ -188,13 +193,13 @@ def stub_out_db_network_api(stubs):
|
||||
if ips:
|
||||
ips[0]['instance_id'] = None
|
||||
ips[0]['instance'] = None
|
||||
ips[0]['mac_address'] = None
|
||||
ips[0]['mac_address_id'] = None
|
||||
ips[0]['virtual_interface'] = None
|
||||
ips[0]['virtual_interface_id'] = None
|
||||
|
||||
def fake_fixed_ip_disassociate_all_by_timeout(context, host, time):
|
||||
return 0
|
||||
|
||||
def fake_fixed_ip_get_all_by_instance(context, instance_id):
|
||||
def fake_fixed_ip_get_by_instance(context, instance_id):
|
||||
ips = filter(lambda i: i['instance_id'] == instance_id,
|
||||
fixed_ips)
|
||||
return [FakeModel(i) for i in ips]
|
||||
@ -220,45 +225,46 @@ def stub_out_db_network_api(stubs):
|
||||
if ips:
|
||||
for key in values:
|
||||
ips[0][key] = values[key]
|
||||
if key == 'mac_address_id':
|
||||
mac = filter(lambda x: x['id'] == values[key],
|
||||
mac_addresses)
|
||||
if not mac:
|
||||
if key == 'virtual_interface_id':
|
||||
vif = filter(lambda x: x['id'] == values[key],
|
||||
virtual_interfacees)
|
||||
if not vif:
|
||||
continue
|
||||
fixed_ip_fields['mac_address'] = FakeModel(mac[0])
|
||||
fixed_ip_fields['virtual_interface'] = FakeModel(vif[0])
|
||||
|
||||
def fake_instance_type_get_by_id(context, id):
|
||||
if flavor_fields['id'] == id:
|
||||
return FakeModel(flavor_fields)
|
||||
|
||||
def fake_mac_address_create(context, values):
|
||||
mac = dict(mac_address_fields)
|
||||
mac['id'] = max([m['id'] for m in mac_addresses] or [-1]) + 1
|
||||
def fake_virtual_interface_create(context, values):
|
||||
vif = dict(virtual_interface_fields)
|
||||
vif['id'] = max([m['id'] for m in virtual_interfacees] or [-1]) + 1
|
||||
for key in values:
|
||||
mac[key] = values[key]
|
||||
return FakeModel(mac)
|
||||
vif[key] = values[key]
|
||||
return FakeModel(vif)
|
||||
|
||||
def fake_mac_address_delete_by_instance(context, instance_id):
|
||||
addresses = [m for m in mac_addresses \
|
||||
def fake_virtual_interface_delete_by_instance(context, instance_id):
|
||||
addresses = [m for m in virtual_interfacees \
|
||||
if m['instance_id'] == instance_id]
|
||||
try:
|
||||
for address in addresses:
|
||||
mac_addresses.remove(address)
|
||||
virtual_interfacees.remove(address)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def fake_mac_address_get_all_by_instance(context, instance_id):
|
||||
return [FakeModel(m) for m in mac_addresses \
|
||||
def fake_virtual_interface_get_by_instance(context, instance_id):
|
||||
return [FakeModel(m) for m in virtual_interfacees \
|
||||
if m['instance_id'] == instance_id]
|
||||
|
||||
def fake_mac_address_get_by_instance_and_network(context, instance_id,
|
||||
network_id):
|
||||
mac = filter(lambda m: m['instance_id'] == instance_id \
|
||||
and m['network_id'] == network_id,
|
||||
mac_addresses)
|
||||
if not mac:
|
||||
def fake_virtual_interface_get_by_instance_and_network(context,
|
||||
instance_id,
|
||||
network_id):
|
||||
vif = filter(lambda m: m['instance_id'] == instance_id and \
|
||||
m['network_id'] == network_id,
|
||||
virtual_interfacees)
|
||||
if not vif:
|
||||
return None
|
||||
return FakeModel(mac[0])
|
||||
return FakeModel(vif[0])
|
||||
|
||||
def fake_network_create_safe(context, values):
|
||||
net = dict(network_fields)
|
||||
@ -315,15 +321,15 @@ def stub_out_db_network_api(stubs):
|
||||
fake_fixed_ip_create,
|
||||
fake_fixed_ip_disassociate,
|
||||
fake_fixed_ip_disassociate_all_by_timeout,
|
||||
fake_fixed_ip_get_all_by_instance,
|
||||
fake_fixed_ip_get_by_instance,
|
||||
fake_fixed_ip_get_by_address,
|
||||
fake_fixed_ip_get_network,
|
||||
fake_fixed_ip_update,
|
||||
fake_instance_type_get_by_id,
|
||||
fake_mac_address_create,
|
||||
fake_mac_address_delete_by_instance,
|
||||
fake_mac_address_get_all_by_instance,
|
||||
fake_mac_address_get_by_instance_and_network,
|
||||
fake_virtual_interface_create,
|
||||
fake_virtual_interface_delete_by_instance,
|
||||
fake_virtual_interface_get_by_instance,
|
||||
fake_virtual_interface_get_by_instance_and_network,
|
||||
fake_network_create_safe,
|
||||
fake_network_get,
|
||||
fake_network_get_all,
|
||||
|
@ -60,10 +60,8 @@ class BaseGlanceTest(unittest.TestCase):
|
||||
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22)
|
||||
|
||||
def setUp(self):
|
||||
# FIXME(sirp): we can probably use stubs library here rather than
|
||||
# dependency injection
|
||||
self.client = StubGlanceClient(None)
|
||||
self.service = glance.GlanceImageService(self.client)
|
||||
self.service = glance.GlanceImageService(client=self.client)
|
||||
self.context = context.RequestContext(None, None)
|
||||
|
||||
def assertDateTimesFilled(self, image_meta):
|
||||
|
@ -43,10 +43,12 @@ class NetworkTestCase(test.TestCase):
|
||||
self.network = utils.import_object(FLAGS.network_manager)
|
||||
db_fakes.stub_out_db_network_api(self.stubs)
|
||||
self.network.db = db
|
||||
self.context = context.RequestContext(project=None, user=self.user)
|
||||
self.network.network_api.db = db
|
||||
self.context = context.RequestContext(project='fake', user=self.user)
|
||||
|
||||
def tearDown(self):
|
||||
super(NetworkTestCase, self).tearDown()
|
||||
self.manager.delete_user(self.user.id)
|
||||
reload(db)
|
||||
|
||||
|
||||
@ -65,7 +67,7 @@ class TestFuncs(object):
|
||||
|
||||
def test_allocate_for_instance(self):
|
||||
instance_id = 0
|
||||
project_id = 0
|
||||
project_id = self.context.project_id
|
||||
type_id = 0
|
||||
self.network.set_network_hosts(self.context)
|
||||
nw = self.network.allocate_for_instance(self.context,
|
||||
@ -100,18 +102,18 @@ class TestFuncs(object):
|
||||
self.network.add_fixed_ip_to_instance(self.context,
|
||||
instance_id=instance_id,
|
||||
network_id=network_id)
|
||||
ips = db.fixed_ip_get_all_by_instance(self.context, instance_id)
|
||||
ips = db.fixed_ip_get_by_instance(self.context, instance_id)
|
||||
for ip in ips:
|
||||
self.assertTrue(ip['allocated'])
|
||||
self.network.deallocate_for_instance(self.context,
|
||||
instance_id=instance_id)
|
||||
ips = db.fixed_ip_get_all_by_instance(self.context, instance_id)
|
||||
ips = db.fixed_ip_get_by_instance(self.context, instance_id)
|
||||
for ip in ips:
|
||||
self.assertFalse(ip['allocated'])
|
||||
|
||||
def test_lease_release_fixed_ip(self):
|
||||
instance_id = 0
|
||||
project_id = 0
|
||||
project_id = self.context.project_id
|
||||
type_id = 0
|
||||
self.network.set_network_hosts(self.context)
|
||||
nw = self.network.allocate_for_instance(self.context,
|
||||
@ -122,21 +124,21 @@ class TestFuncs(object):
|
||||
self.assertTrue(nw[0])
|
||||
network_id = nw[0][0]['id']
|
||||
|
||||
ips = db.fixed_ip_get_all_by_instance(self.context, instance_id)
|
||||
mac = db.mac_address_get_by_instance_and_network(self.context,
|
||||
instance_id,
|
||||
network_id)
|
||||
ips = db.fixed_ip_get_by_instance(self.context, instance_id)
|
||||
vif = db.virtual_interface_get_by_instance_and_network(self.context,
|
||||
instance_id,
|
||||
network_id)
|
||||
self.assertTrue(ips)
|
||||
address = ips[0]['address']
|
||||
|
||||
db.fixed_ip_associate(self.context, address, instance_id)
|
||||
db.fixed_ip_update(self.context, address,
|
||||
{'mac_address_id': mac['id']})
|
||||
{'virtual_interface_id': vif['id']})
|
||||
|
||||
self.network.lease_fixed_ip(self.context, mac['address'], address)
|
||||
self.network.lease_fixed_ip(self.context, vif['address'], address)
|
||||
ip = db.fixed_ip_get_by_address(self.context, address)
|
||||
self.assertTrue(ip['leased'])
|
||||
|
||||
self.network.release_fixed_ip(self.context, mac['address'], address)
|
||||
self.network.release_fixed_ip(self.context, vif['address'], address)
|
||||
ip = db.fixed_ip_get_by_address(self.context, address)
|
||||
self.assertFalse(ip['leased'])
|
||||
|
@ -133,11 +133,11 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300]
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700]
|
||||
['>', '$compute.disk_available', 700],
|
||||
]
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
@ -183,12 +183,12 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False]
|
||||
['not', True, False, True, False],
|
||||
)))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False
|
||||
'not', True, False, True, False,
|
||||
))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
|
@ -44,7 +44,7 @@ class WeightedSumTestCase(test.TestCase):
|
||||
hosts = [
|
||||
FakeHost(1, 512 * MB, 100),
|
||||
FakeHost(2, 256 * MB, 400),
|
||||
FakeHost(3, 512 * MB, 100)
|
||||
FakeHost(3, 512 * MB, 100),
|
||||
]
|
||||
|
||||
weighted_fns = [
|
||||
@ -96,7 +96,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_noop_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
'nova.scheduler.least_cost.noop_cost_fn',
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 1
|
||||
|
||||
@ -110,7 +110,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_cost_fn_weights(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.noop_cost_fn'
|
||||
'nova.scheduler.least_cost.noop_cost_fn',
|
||||
]
|
||||
FLAGS.noop_cost_fn_weight = 2
|
||||
|
||||
@ -124,7 +124,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
def test_fill_first_cost_fn(self):
|
||||
FLAGS.least_cost_scheduler_cost_functions = [
|
||||
'nova.scheduler.least_cost.fill_first_cost_fn'
|
||||
'nova.scheduler.least_cost.fill_first_cost_fn',
|
||||
]
|
||||
FLAGS.fill_first_cost_fn_weight = 1
|
||||
|
||||
|
@ -197,7 +197,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
||||
'instance_properties': {},
|
||||
'instance_type': {},
|
||||
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
|
||||
'blob': "Non-None blob data"
|
||||
'blob': "Non-None blob data",
|
||||
}
|
||||
|
||||
result = sched.schedule_run_instance(None, 1, request_spec)
|
||||
|
@ -116,6 +116,19 @@ class CloudTestCase(test.TestCase):
|
||||
public_ip=address)
|
||||
db.floating_ip_destroy(self.context, address)
|
||||
|
||||
@test.skip_test("Skipping this pending future merge")
|
||||
def test_allocate_address(self):
|
||||
address = "10.10.10.10"
|
||||
allocate = self.cloud.allocate_address
|
||||
db.floating_ip_create(self.context,
|
||||
{'address': address,
|
||||
'host': self.network.host})
|
||||
self.assertEqual(allocate(self.context)['publicIp'], address)
|
||||
db.floating_ip_destroy(self.context, address)
|
||||
self.assertRaises(exception.NoMoreFloatingIps,
|
||||
allocate,
|
||||
self.context)
|
||||
|
||||
@test.skip_test("Skipping this pending future merge")
|
||||
def test_associate_disassociate_address(self):
|
||||
"""Verifies associate runs cleanly without raising an exception"""
|
||||
|
@ -339,6 +339,7 @@ class ComputeTestCase(test.TestCase):
|
||||
pass
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'finish_resize', fake)
|
||||
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake)
|
||||
context = self.context.elevated()
|
||||
instance_id = self._create_instance()
|
||||
self.compute.prep_resize(context, instance_id, 1)
|
||||
|
@ -16,7 +16,11 @@
|
||||
Tests for Crypto module.
|
||||
"""
|
||||
|
||||
import mox
|
||||
import stubout
|
||||
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import test
|
||||
|
||||
|
||||
@ -46,3 +50,82 @@ class SymmetricKeyTestCase(test.TestCase):
|
||||
plain = decrypt(cipher_text)
|
||||
|
||||
self.assertEquals(plain_text, plain)
|
||||
|
||||
|
||||
class RevokeCertsTest(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(RevokeCertsTest, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(RevokeCertsTest, self).tearDown()
|
||||
|
||||
def test_revoke_certs_by_user_and_project(self):
|
||||
user_id = 'test_user'
|
||||
project_id = 2
|
||||
file_name = 'test_file'
|
||||
|
||||
def mock_certificate_get_all_by_user_and_project(context,
|
||||
user_id,
|
||||
project_id):
|
||||
|
||||
return [{"user_id": user_id, "project_id": project_id,
|
||||
"file_name": file_name}]
|
||||
|
||||
self.stubs.Set(db, 'certificate_get_all_by_user_and_project',
|
||||
mock_certificate_get_all_by_user_and_project)
|
||||
|
||||
self.mox.StubOutWithMock(crypto, 'revoke_cert')
|
||||
crypto.revoke_cert(project_id, file_name)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
crypto.revoke_certs_by_user_and_project(user_id, project_id)
|
||||
|
||||
self.mox.VerifyAll()
|
||||
|
||||
def test_revoke_certs_by_user(self):
|
||||
user_id = 'test_user'
|
||||
project_id = 2
|
||||
file_name = 'test_file'
|
||||
|
||||
def mock_certificate_get_all_by_user(context, user_id):
|
||||
|
||||
return [{"user_id": user_id, "project_id": project_id,
|
||||
"file_name": file_name}]
|
||||
|
||||
self.stubs.Set(db, 'certificate_get_all_by_user',
|
||||
mock_certificate_get_all_by_user)
|
||||
|
||||
self.mox.StubOutWithMock(crypto, 'revoke_cert')
|
||||
crypto.revoke_cert(project_id, mox.IgnoreArg())
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
crypto.revoke_certs_by_user(user_id)
|
||||
|
||||
self.mox.VerifyAll()
|
||||
|
||||
def test_revoke_certs_by_project(self):
|
||||
user_id = 'test_user'
|
||||
project_id = 2
|
||||
file_name = 'test_file'
|
||||
|
||||
def mock_certificate_get_all_by_project(context, project_id):
|
||||
|
||||
return [{"user_id": user_id, "project_id": project_id,
|
||||
"file_name": file_name}]
|
||||
|
||||
self.stubs.Set(db, 'certificate_get_all_by_project',
|
||||
mock_certificate_get_all_by_project)
|
||||
|
||||
self.mox.StubOutWithMock(crypto, 'revoke_cert')
|
||||
crypto.revoke_cert(project_id, mox.IgnoreArg())
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
crypto.revoke_certs_by_project(project_id)
|
||||
|
||||
self.mox.VerifyAll()
|
||||
|
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filter Drivers.
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
@ -31,7 +31,7 @@ class FakeZoneManager:
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filter drivers."""
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
@ -57,8 +57,8 @@ class HostFilterTestCase(test.TestCase):
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
self.old_flag = FLAGS.default_host_filter_driver
|
||||
FLAGS.default_host_filter_driver = \
|
||||
self.old_flag = FLAGS.default_host_filter
|
||||
FLAGS.default_host_filter = \
|
||||
'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
@ -76,51 +76,52 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def tearDown(self):
|
||||
FLAGS.default_host_filter_driver = self.old_flag
|
||||
FLAGS.default_host_filter = self.old_flag
|
||||
|
||||
def test_choose_driver(self):
|
||||
# Test default driver ...
|
||||
driver = host_filter.choose_driver()
|
||||
self.assertEquals(driver._full_name(),
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid driver ...
|
||||
driver = host_filter.choose_driver(
|
||||
'nova.scheduler.host_filter.FlavorFilter')
|
||||
self.assertEquals(driver._full_name(),
|
||||
'nova.scheduler.host_filter.FlavorFilter')
|
||||
# Test invalid driver ...
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_driver('does not exist')
|
||||
self.fail("Should not find driver")
|
||||
except exception.SchedulerHostFilterDriverNotFound:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_driver(self):
|
||||
driver = host_filter.AllHostsFilter()
|
||||
cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_flavor_driver(self):
|
||||
driver = host_filter.FlavorFilter()
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_driver(self):
|
||||
driver = host_filter.JsonFilter()
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = driver.instance_type_to_filter(self.instance_type)
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
@ -132,12 +133,16 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300]],
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700]]]
|
||||
['>', '$compute.disk_available', 700],
|
||||
],
|
||||
]
|
||||
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@ -146,9 +151,10 @@ class HostFilterTestCase(test.TestCase):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['not',
|
||||
['=', '$compute.host_memory_free', 30], ]
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@ -158,7 +164,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = driver.filter_hosts(self.zone_manager, cooked)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
@ -170,30 +176,30 @@ class HostFilterTestCase(test.TestCase):
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
driver.filter_hosts(self.zone_manager, cooked)
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False])))
|
||||
|
||||
try:
|
||||
driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', '$foo', 100])))
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', '$.....', 100])))
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=',
|
||||
['<=', ['in', ]]]]]]]])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
|
||||
['=', {}, ['>', '$missing....foo']])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
||||
|
@ -73,14 +73,14 @@ def _setup_networking(instance_id, ip='1.2.3.4'):
|
||||
network_ref = db.project_get_networks(ctxt,
|
||||
'fake',
|
||||
associate=True)[0]
|
||||
mac_address = {'address': '56:12:12:12:12:12',
|
||||
'network_id': network_ref['id'],
|
||||
'instance_id': instance_id}
|
||||
mac_ref = db.mac_address_create(ctxt, mac_address)
|
||||
vif = {'address': '56:12:12:12:12:12',
|
||||
'network_id': network_ref['id'],
|
||||
'instance_id': instance_id}
|
||||
vif_ref = db.virtual_interface_create(ctxt, vif)
|
||||
|
||||
fixed_ip = {'address': ip,
|
||||
'network_id': network_ref['id'],
|
||||
'mac_address_id': mac_ref['id']}
|
||||
'virtual_interface_id': vif_ref['id']}
|
||||
db.fixed_ip_create(ctxt, fixed_ip)
|
||||
db.fixed_ip_update(ctxt, ip, {'allocated': True,
|
||||
'instance_id': instance_id})
|
||||
@ -182,7 +182,6 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
test_instance = {'memory_kb': '1024000',
|
||||
'basepath': '/some/path',
|
||||
'bridge_name': 'br100',
|
||||
'mac_address': '02:12:34:46:56:67',
|
||||
'vcpus': 2,
|
||||
'project_id': 'fake',
|
||||
'bridge': 'br101',
|
||||
@ -296,23 +295,27 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertTrue(params.find('PROJNETV6') > -1)
|
||||
self.assertTrue(params.find('PROJMASKV6') > -1)
|
||||
|
||||
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
|
||||
def test_xml_and_uri_no_ramdisk_no_kernel(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
self._check_xml_and_uri(instance_data,
|
||||
expect_kernel=False, expect_ramdisk=False)
|
||||
|
||||
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
|
||||
def test_xml_and_uri_no_ramdisk(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
instance_data['kernel_id'] = 'aki-deadbeef'
|
||||
self._check_xml_and_uri(instance_data,
|
||||
expect_kernel=True, expect_ramdisk=False)
|
||||
|
||||
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
|
||||
def test_xml_and_uri_no_kernel(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
||||
self._check_xml_and_uri(instance_data,
|
||||
expect_kernel=False, expect_ramdisk=False)
|
||||
|
||||
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
|
||||
def test_xml_and_uri(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
||||
@ -320,6 +323,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self._check_xml_and_uri(instance_data,
|
||||
expect_kernel=True, expect_ramdisk=True)
|
||||
|
||||
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
|
||||
def test_xml_and_uri_rescue(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
instance_data['ramdisk_id'] = 'ari-deadbeef'
|
||||
@ -327,6 +331,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self._check_xml_and_uri(instance_data, expect_kernel=True,
|
||||
expect_ramdisk=True, rescue=True)
|
||||
|
||||
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
|
||||
def test_lxc_container_and_uri(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
self._check_xml_and_container(instance_data)
|
||||
@ -431,13 +436,13 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
network_ref = db.project_get_networks(context.get_admin_context(),
|
||||
self.project.id)[0]
|
||||
|
||||
mac_address = {'address': '56:12:12:12:12:12',
|
||||
'network_id': network_ref['id'],
|
||||
'instance_id': instance_ref['id']}
|
||||
mac_ref = db.mac_address_create(self.context, mac_address)
|
||||
vif = {'address': '56:12:12:12:12:12',
|
||||
'network_id': network_ref['id'],
|
||||
'instance_id': instance_ref['id']}
|
||||
vif_ref = db.virtual_interface_create(self.context, vif)
|
||||
fixed_ip = {'address': self.test_ip,
|
||||
'network_id': network_ref['id'],
|
||||
'mac_address_id': mac_ref['id']}
|
||||
'virtual_interface_id': vif_ref['id']}
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
|
||||
@ -881,9 +886,9 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
return db.instance_create(self.context,
|
||||
{'user_id': 'fake',
|
||||
'project_id': 'fake',
|
||||
'mac_address': '56:12:12:12:12:12',
|
||||
'instance_type_id': 1})
|
||||
|
||||
@test.skip_test("skipping libvirt tests depends on get_network_info shim")
|
||||
def test_static_filters(self):
|
||||
instance_ref = self._create_instance_ref()
|
||||
ip = '10.11.12.13'
|
||||
@ -891,14 +896,14 @@ class IptablesFirewallTestCase(test.TestCase):
|
||||
network_ref = db.project_get_networks(self.context,
|
||||
'fake',
|
||||
associate=True)[0]
|
||||
mac_address = {'address': '56:12:12:12:12:12',
|
||||
'network_id': network_ref['id'],
|
||||
'instance_id': instance_ref['id']}
|
||||
mac_ref = db.mac_address_create(self.context, mac_address)
|
||||
vif = {'address': '56:12:12:12:12:12',
|
||||
'network_id': network_ref['id'],
|
||||
'instance_id': instance_ref['id']}
|
||||
vif_ref = db.virtual_interface_create(self.context, vif)
|
||||
|
||||
fixed_ip = {'address': ip,
|
||||
'network_id': network_ref['id'],
|
||||
'mac_address_id': mac_ref['id']}
|
||||
'virtual_interface_id': vif_ref['id']}
|
||||
admin_ctxt = context.get_admin_context()
|
||||
db.fixed_ip_create(admin_ctxt, fixed_ip)
|
||||
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
|
||||
@ -1165,7 +1170,6 @@ class NWFilterTestCase(test.TestCase):
|
||||
return db.instance_create(self.context,
|
||||
{'user_id': 'fake',
|
||||
'project_id': 'fake',
|
||||
'mac_address': '00:A0:C9:14:C8:29',
|
||||
'instance_type_id': 1})
|
||||
|
||||
def _create_instance_type(self, params={}):
|
||||
@ -1260,6 +1264,7 @@ class NWFilterTestCase(test.TestCase):
|
||||
"fake")
|
||||
self.assertEquals(len(result), 3)
|
||||
|
||||
@test.skip_test("skip libvirt test project_get_network no longer exists")
|
||||
def test_unfilter_instance_undefines_nwfilters(self):
|
||||
admin_ctxt = context.get_admin_context()
|
||||
|
||||
|
@ -83,7 +83,6 @@ class XenAPIVolumeTestCase(test.TestCase):
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
|
||||
def _create_volume(self, size='0'):
|
||||
@ -210,10 +209,23 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
|
||||
{'broadcast': '192.168.0.255',
|
||||
'dns': ['192.168.0.1'],
|
||||
'gateway': '192.168.0.1',
|
||||
'gateway6': 'dead:beef::1',
|
||||
'ip6s': [{'enabled': '1',
|
||||
'ip': 'dead:beef::dcad:beff:feef:0',
|
||||
'netmask': '64'}],
|
||||
'ips': [{'enabled': '1',
|
||||
'ip': '192.168.0.100',
|
||||
'netmask': '255.255.255.0'}],
|
||||
'label': 'fake',
|
||||
'mac': 'DE:AD:BE:EF:00:00',
|
||||
'rxtx_cap': 3})]
|
||||
instance = db.instance_create(self.context, values)
|
||||
self.conn.spawn(instance, {})
|
||||
self.conn.spawn(instance, network_info)
|
||||
|
||||
gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id)
|
||||
gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id)
|
||||
@ -301,22 +313,22 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
|
||||
if check_injection:
|
||||
xenstore_data = self.vm['xenstore_data']
|
||||
key = 'vm-data/networking/aabbccddeeff'
|
||||
key = 'vm-data/networking/DEADBEEF0000'
|
||||
xenstore_value = xenstore_data[key]
|
||||
tcpip_data = ast.literal_eval(xenstore_value)
|
||||
self.assertEquals(tcpip_data,
|
||||
{'label': 'fake_flat_network',
|
||||
'broadcast': '10.0.0.255',
|
||||
'ips': [{'ip': '10.0.0.3',
|
||||
'netmask':'255.255.255.0',
|
||||
'enabled':'1'}],
|
||||
'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff',
|
||||
'netmask': '120',
|
||||
'enabled': '1'}],
|
||||
'mac': 'aa:bb:cc:dd:ee:ff',
|
||||
'dns': ['10.0.0.2'],
|
||||
'gateway': '10.0.0.1',
|
||||
'gateway6': 'fe80::a00:1'})
|
||||
{'broadcast': '192.168.0.255',
|
||||
'dns': ['192.168.0.1'],
|
||||
'gateway': '192.168.0.1',
|
||||
'gateway6': 'dead:beef::1',
|
||||
'ip6s': [{'enabled': '1',
|
||||
'ip': 'dead:beef::dcad:beff:feef:0',
|
||||
'netmask': '64'}],
|
||||
'ips': [{'enabled': '1',
|
||||
'ip': '192.168.0.100',
|
||||
'netmask': '255.255.255.0'}],
|
||||
'label': 'fake',
|
||||
'mac': 'DE:AD:BE:EF:00:00'})
|
||||
|
||||
def check_vm_params_for_windows(self):
|
||||
self.assertEquals(self.vm['platform']['nx'], 'true')
|
||||
@ -331,7 +343,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
|
||||
def check_vm_params_for_linux(self):
|
||||
self.assertEquals(self.vm['platform']['nx'], 'false')
|
||||
self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
|
||||
self.assertEquals(self.vm['PV_args'], '')
|
||||
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
|
||||
|
||||
# check that these are not set
|
||||
@ -361,11 +373,24 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'kernel_id': kernel_id,
|
||||
'ramdisk_id': ramdisk_id,
|
||||
'instance_type_id': instance_type_id,
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': os_type}
|
||||
if create_record:
|
||||
instance = db.instance_create(self.context, values)
|
||||
self.conn.spawn(instance, None)
|
||||
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True},
|
||||
{'broadcast': '192.168.0.255',
|
||||
'dns': ['192.168.0.1'],
|
||||
'gateway': '192.168.0.1',
|
||||
'gateway6': 'dead:beef::1',
|
||||
'ip6s': [{'enabled': '1',
|
||||
'ip': 'dead:beef::dcad:beff:feef:0',
|
||||
'netmask': '64'}],
|
||||
'ips': [{'enabled': '1',
|
||||
'ip': '192.168.0.100',
|
||||
'netmask': '255.255.255.0'}],
|
||||
'label': 'fake',
|
||||
'mac': 'DE:AD:BE:EF:00:00',
|
||||
'rxtx_cap': 3})]
|
||||
self.conn.spawn(instance, network_info)
|
||||
else:
|
||||
instance = db.instance_get(self.context, instance_id)
|
||||
self.create_vm_record(self.conn, os_type, instance_id)
|
||||
@ -447,11 +472,11 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
index = config.index('auto eth0')
|
||||
self.assertEquals(config[index + 1:index + 8], [
|
||||
'iface eth0 inet static',
|
||||
'address 10.0.0.3',
|
||||
'address 192.168.0.100',
|
||||
'netmask 255.255.255.0',
|
||||
'broadcast 10.0.0.255',
|
||||
'gateway 10.0.0.1',
|
||||
'dns-nameservers 10.0.0.2',
|
||||
'broadcast 192.168.0.255',
|
||||
'gateway 192.168.0.1',
|
||||
'dns-nameservers 192.168.0.1',
|
||||
''])
|
||||
self._tee_executed = True
|
||||
return '', ''
|
||||
@ -554,7 +579,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
|
||||
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
|
||||
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
|
||||
str(4 * 1024))
|
||||
str(3 * 1024))
|
||||
|
||||
def test_rescue(self):
|
||||
self.flags(xenapi_inject_image=False)
|
||||
@ -587,10 +612,23 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
instance = db.instance_create(self.context, values)
|
||||
self.conn.spawn(instance, None)
|
||||
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
|
||||
{'broadcast': '192.168.0.255',
|
||||
'dns': ['192.168.0.1'],
|
||||
'gateway': '192.168.0.1',
|
||||
'gateway6': 'dead:beef::1',
|
||||
'ip6s': [{'enabled': '1',
|
||||
'ip': 'dead:beef::dcad:beff:feef:0',
|
||||
'netmask': '64'}],
|
||||
'ips': [{'enabled': '1',
|
||||
'ip': '192.168.0.100',
|
||||
'netmask': '255.255.255.0'}],
|
||||
'label': 'fake',
|
||||
'mac': 'DE:AD:BE:EF:00:00',
|
||||
'rxtx_cap': 3})]
|
||||
self.conn.spawn(instance, network_info)
|
||||
return instance
|
||||
|
||||
|
||||
@ -662,7 +700,6 @@ class XenAPIMigrateInstance(test.TestCase):
|
||||
'ramdisk_id': None,
|
||||
'local_gb': 5,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
'os_type': 'linux'}
|
||||
|
||||
fake_utils.stub_out_utils_execute(self.stubs)
|
||||
@ -687,7 +724,22 @@ class XenAPIMigrateInstance(test.TestCase):
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||
stubs.stubout_loopingcall_start(self.stubs)
|
||||
conn = xenapi_conn.get_connection(False)
|
||||
conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'))
|
||||
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
|
||||
{'broadcast': '192.168.0.255',
|
||||
'dns': ['192.168.0.1'],
|
||||
'gateway': '192.168.0.1',
|
||||
'gateway6': 'dead:beef::1',
|
||||
'ip6s': [{'enabled': '1',
|
||||
'ip': 'dead:beef::dcad:beff:feef:0',
|
||||
'netmask': '64'}],
|
||||
'ips': [{'enabled': '1',
|
||||
'ip': '192.168.0.100',
|
||||
'netmask': '255.255.255.0'}],
|
||||
'label': 'fake',
|
||||
'mac': 'DE:AD:BE:EF:00:00',
|
||||
'rxtx_cap': 3})]
|
||||
conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'),
|
||||
network_info)
|
||||
|
||||
|
||||
class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
||||
|
@ -23,6 +23,7 @@ Handling of VM disk images.
|
||||
|
||||
from nova import context
|
||||
from nova import flags
|
||||
from nova.image import glance as glance_image_service
|
||||
import nova.image
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
@ -42,13 +43,3 @@ def fetch(image_href, path, _user, _project):
|
||||
elevated = context.get_admin_context()
|
||||
metadata = image_service.get(elevated, image_id, image_file)
|
||||
return metadata
|
||||
|
||||
|
||||
# TODO(vish): xenapi should use the glance client code directly instead
|
||||
# of retrieving the image using this method.
|
||||
def image_url(image):
|
||||
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
|
||||
return "http://%s:%s/images/%s" % (FLAGS.glance_host,
|
||||
FLAGS.glance_port, image)
|
||||
return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
|
||||
image)
|
||||
|
@ -33,6 +33,7 @@ import glance.client
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
import nova.image
|
||||
from nova.image import glance as glance_image_service
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.auth.manager import AuthManager
|
||||
@ -156,7 +157,6 @@ class VMHelper(HelperBase):
|
||||
rec['PV_ramdisk'] = ramdisk
|
||||
else:
|
||||
# 2. Use kernel within the image
|
||||
rec['PV_args'] = 'clocksource=jiffies'
|
||||
rec['PV_bootloader'] = 'pygrub'
|
||||
else:
|
||||
# 3. Using hardware virtualization
|
||||
@ -358,10 +358,12 @@ class VMHelper(HelperBase):
|
||||
|
||||
os_type = instance.os_type or FLAGS.default_os_type
|
||||
|
||||
glance_host, glance_port = \
|
||||
glance_image_service.pick_glance_api_server()
|
||||
params = {'vdi_uuids': vdi_uuids,
|
||||
'image_id': image_id,
|
||||
'glance_host': FLAGS.glance_host,
|
||||
'glance_port': FLAGS.glance_port,
|
||||
'glance_host': glance_host,
|
||||
'glance_port': glance_port,
|
||||
'sr_path': cls.get_sr_path(session),
|
||||
'os_type': os_type}
|
||||
|
||||
@ -409,9 +411,11 @@ class VMHelper(HelperBase):
|
||||
# here (under Python 2.6+) and pass them as arguments
|
||||
uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
|
||||
|
||||
glance_host, glance_port = \
|
||||
glance_image_service.pick_glance_api_server()
|
||||
params = {'image_id': image,
|
||||
'glance_host': FLAGS.glance_host,
|
||||
'glance_port': FLAGS.glance_port,
|
||||
'glance_host': glance_host,
|
||||
'glance_port': glance_port,
|
||||
'uuid_stack': uuid_stack,
|
||||
'sr_path': cls.get_sr_path(session)}
|
||||
|
||||
@ -576,7 +580,8 @@ class VMHelper(HelperBase):
|
||||
Returns: A single filename if image_type is KERNEL_RAMDISK
|
||||
A list of dictionaries that describe VDIs, otherwise
|
||||
"""
|
||||
url = images.image_url(image)
|
||||
url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
|
||||
image)
|
||||
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
|
||||
if image_type == ImageType.KERNEL_RAMDISK:
|
||||
fn = 'get_kernel'
|
||||
|
@ -88,11 +88,12 @@ class VMOps(object):
|
||||
vm_ref = VMHelper.lookup(self._session, instance.name)
|
||||
self._start(instance, vm_ref)
|
||||
|
||||
def finish_resize(self, instance, disk_info):
|
||||
def finish_resize(self, instance, disk_info, network_info):
|
||||
vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
|
||||
disk_info['cow'])
|
||||
vm_ref = self._create_vm(instance,
|
||||
[dict(vdi_type='os', vdi_uuid=vdi_uuid)])
|
||||
[dict(vdi_type='os', vdi_uuid=vdi_uuid)],
|
||||
network_info)
|
||||
self.resize_instance(instance, vdi_uuid)
|
||||
self._spawn(instance, vm_ref)
|
||||
|
||||
|
@ -202,9 +202,9 @@ class XenAPIConnection(driver.ComputeDriver):
|
||||
"""Reverts a resize, powering back on the instance"""
|
||||
self._vmops.revert_resize(instance)
|
||||
|
||||
def finish_resize(self, instance, disk_info):
|
||||
def finish_resize(self, instance, disk_info, network_info):
|
||||
"""Completes a resize, turning on the migrated instance"""
|
||||
self._vmops.finish_resize(instance, disk_info)
|
||||
self._vmops.finish_resize(instance, disk_info, network_info)
|
||||
|
||||
def snapshot(self, instance, image_id):
|
||||
""" Create snapshot from a running VM instance """
|
||||
|