* Updated readme file with installation of suds-0.4 through easy_install.
* Removed pass functions * Fixed pep8 errors * Few bug fixes and other commits Also rebased this branch to nova revision 761
This commit is contained in:
commit
67e63ba87f
2
.mailmap
2
.mailmap
@ -15,10 +15,12 @@
|
||||
<corywright@gmail.com> <cory.wright@rackspace.com>
|
||||
<devin.carlen@gmail.com> <devcamcar@illian.local>
|
||||
<ewan.mellor@citrix.com> <emellor@silver>
|
||||
<itoumsn@nttdata.co.jp> <itoumsn@shayol>
|
||||
<jaypipes@gmail.com> <jpipes@serialcoder>
|
||||
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
|
||||
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
|
||||
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
|
||||
<josh@jk0.org> <josh.kearney@rackspace.com>
|
||||
<justin@fathomdb.com> <justinsb@justinsb-desktop>
|
||||
<justin@fathomdb.com> <superstack@superstack.org>
|
||||
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
|
||||
|
3
Authors
3
Authors
@ -31,7 +31,7 @@ John Dewey <john@dewey.ws>
|
||||
Jonathan Bryce <jbryce@jbryce.com>
|
||||
Jordan Rinke <jordan@openstack.org>
|
||||
Josh Durgin <joshd@hq.newdream.net>
|
||||
Josh Kearney <josh.kearney@rackspace.com>
|
||||
Josh Kearney <josh@jk0.org>
|
||||
Joshua McKenty <jmckenty@gmail.com>
|
||||
Justin Santa Barbara <justin@fathomdb.com>
|
||||
Kei Masumoto <masumotok@nttdata.co.jp>
|
||||
@ -39,6 +39,7 @@ Ken Pepple <ken.pepple@gmail.com>
|
||||
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
|
||||
Koji Iida <iida.koji@lab.ntt.co.jp>
|
||||
Lorin Hochstein <lorin@isi.edu>
|
||||
Masanori Itoh <itoumsn@nttdata.co.jp>
|
||||
Matt Dietz <matt.dietz@rackspace.com>
|
||||
Michael Gundlach <michael.gundlach@rackspace.com>
|
||||
Monsyne Dragon <mdragon@rackspace.com>
|
||||
|
@ -84,6 +84,7 @@ from nova import utils
|
||||
from nova.api.ec2.cloud import ec2_id_to_id
|
||||
from nova.auth import manager
|
||||
from nova.cloudpipe import pipelib
|
||||
from nova.compute import instance_types
|
||||
from nova.db import migration
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@ -661,6 +662,79 @@ class VolumeCommands(object):
|
||||
"mountpoint": volume['mountpoint']}})
|
||||
|
||||
|
||||
class InstanceTypeCommands(object):
|
||||
"""Class for managing instance types / flavors."""
|
||||
|
||||
def _print_instance_types(self, n, val):
|
||||
deleted = ('', ', inactive')[val["deleted"] == 1]
|
||||
print ("%s: Memory: %sMB, VCPUS: %s, Storage: %sGB, FlavorID: %s, "
|
||||
"Swap: %sGB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % (
|
||||
n, val["memory_mb"], val["vcpus"], val["local_gb"],
|
||||
val["flavorid"], val["swap"], val["rxtx_quota"],
|
||||
val["rxtx_cap"], deleted)
|
||||
|
||||
def create(self, name, memory, vcpus, local_gb, flavorid,
|
||||
swap=0, rxtx_quota=0, rxtx_cap=0):
|
||||
"""Creates instance types / flavors
|
||||
arguments: name memory vcpus local_gb flavorid [swap] [rxtx_quota]
|
||||
[rxtx_cap]
|
||||
"""
|
||||
try:
|
||||
instance_types.create(name, memory, vcpus, local_gb,
|
||||
flavorid, swap, rxtx_quota, rxtx_cap)
|
||||
except exception.InvalidInputException:
|
||||
print "Must supply valid parameters to create instance type"
|
||||
print e
|
||||
sys.exit(1)
|
||||
except exception.DBError, e:
|
||||
print "DB Error: %s" % e
|
||||
sys.exit(2)
|
||||
except:
|
||||
print "Unknown error"
|
||||
sys.exit(3)
|
||||
else:
|
||||
print "%s created" % name
|
||||
|
||||
def delete(self, name, purge=None):
|
||||
"""Marks instance types / flavors as deleted
|
||||
arguments: name"""
|
||||
try:
|
||||
if purge == "--purge":
|
||||
instance_types.purge(name)
|
||||
verb = "purged"
|
||||
else:
|
||||
instance_types.destroy(name)
|
||||
verb = "deleted"
|
||||
except exception.ApiError:
|
||||
print "Valid instance type name is required"
|
||||
sys.exit(1)
|
||||
except exception.DBError, e:
|
||||
print "DB Error: %s" % e
|
||||
sys.exit(2)
|
||||
except:
|
||||
sys.exit(3)
|
||||
else:
|
||||
print "%s %s" % (name, verb)
|
||||
|
||||
def list(self, name=None):
|
||||
"""Lists all active or specific instance types / flavors
|
||||
arguments: [name]"""
|
||||
try:
|
||||
if name == None:
|
||||
inst_types = instance_types.get_all_types()
|
||||
elif name == "--all":
|
||||
inst_types = instance_types.get_all_types(1)
|
||||
else:
|
||||
inst_types = instance_types.get_instance_type(name)
|
||||
except exception.DBError, e:
|
||||
_db_error(e)
|
||||
if isinstance(inst_types.values()[0], dict):
|
||||
for k, v in inst_types.iteritems():
|
||||
self._print_instance_types(k, v)
|
||||
else:
|
||||
self._print_instance_types(name, inst_types)
|
||||
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('project', ProjectCommands),
|
||||
@ -673,7 +747,9 @@ CATEGORIES = [
|
||||
('service', ServiceCommands),
|
||||
('log', LogCommands),
|
||||
('db', DbCommands),
|
||||
('volume', VolumeCommands)]
|
||||
('volume', VolumeCommands),
|
||||
('instance_type', InstanceTypeCommands),
|
||||
('flavor', InstanceTypeCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
|
@ -40,6 +40,9 @@ source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
|
||||
source/api/nova..db.sqlalchemy.migration.rst
|
||||
source/api/nova..db.sqlalchemy.models.rst
|
||||
source/api/nova..db.sqlalchemy.session.rst
|
||||
@ -98,6 +101,7 @@ source/api/nova..tests.test_compute.rst
|
||||
source/api/nova..tests.test_console.rst
|
||||
source/api/nova..tests.test_direct.rst
|
||||
source/api/nova..tests.test_flags.rst
|
||||
source/api/nova..tests.test_instance_types.rst
|
||||
source/api/nova..tests.test_localization.rst
|
||||
source/api/nova..tests.test_log.rst
|
||||
source/api/nova..tests.test_middleware.rst
|
||||
@ -107,7 +111,9 @@ source/api/nova..tests.test_quota.rst
|
||||
source/api/nova..tests.test_rpc.rst
|
||||
source/api/nova..tests.test_scheduler.rst
|
||||
source/api/nova..tests.test_service.rst
|
||||
source/api/nova..tests.test_test.rst
|
||||
source/api/nova..tests.test_twistd.rst
|
||||
source/api/nova..tests.test_utils.rst
|
||||
source/api/nova..tests.test_virt.rst
|
||||
source/api/nova..tests.test_volume.rst
|
||||
source/api/nova..tests.test_xenapi.rst
|
||||
@ -176,6 +182,9 @@ source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
|
||||
source/api/nova..db.sqlalchemy.migration.rst
|
||||
source/api/nova..db.sqlalchemy.models.rst
|
||||
source/api/nova..db.sqlalchemy.session.rst
|
||||
@ -234,6 +243,7 @@ source/api/nova..tests.test_compute.rst
|
||||
source/api/nova..tests.test_console.rst
|
||||
source/api/nova..tests.test_direct.rst
|
||||
source/api/nova..tests.test_flags.rst
|
||||
source/api/nova..tests.test_instance_types.rst
|
||||
source/api/nova..tests.test_localization.rst
|
||||
source/api/nova..tests.test_log.rst
|
||||
source/api/nova..tests.test_middleware.rst
|
||||
@ -243,142 +253,9 @@ source/api/nova..tests.test_quota.rst
|
||||
source/api/nova..tests.test_rpc.rst
|
||||
source/api/nova..tests.test_scheduler.rst
|
||||
source/api/nova..tests.test_service.rst
|
||||
source/api/nova..tests.test_test.rst
|
||||
source/api/nova..tests.test_twistd.rst
|
||||
source/api/nova..tests.test_virt.rst
|
||||
source/api/nova..tests.test_volume.rst
|
||||
source/api/nova..tests.test_xenapi.rst
|
||||
source/api/nova..tests.xenapi.stubs.rst
|
||||
source/api/nova..twistd.rst
|
||||
source/api/nova..utils.rst
|
||||
source/api/nova..version.rst
|
||||
source/api/nova..virt.connection.rst
|
||||
source/api/nova..virt.disk.rst
|
||||
source/api/nova..virt.fake.rst
|
||||
source/api/nova..virt.hyperv.rst
|
||||
source/api/nova..virt.images.rst
|
||||
source/api/nova..virt.libvirt_conn.rst
|
||||
source/api/nova..virt.xenapi.fake.rst
|
||||
source/api/nova..virt.xenapi.network_utils.rst
|
||||
source/api/nova..virt.xenapi.vm_utils.rst
|
||||
source/api/nova..virt.xenapi.vmops.rst
|
||||
source/api/nova..virt.xenapi.volume_utils.rst
|
||||
source/api/nova..virt.xenapi.volumeops.rst
|
||||
source/api/nova..virt.xenapi_conn.rst
|
||||
source/api/nova..volume.api.rst
|
||||
source/api/nova..volume.driver.rst
|
||||
source/api/nova..volume.manager.rst
|
||||
source/api/nova..volume.san.rst
|
||||
source/api/nova..wsgi.rst
|
||||
source/api/nova..adminclient.rst
|
||||
source/api/nova..api.direct.rst
|
||||
source/api/nova..api.ec2.admin.rst
|
||||
source/api/nova..api.ec2.apirequest.rst
|
||||
source/api/nova..api.ec2.cloud.rst
|
||||
source/api/nova..api.ec2.metadatarequesthandler.rst
|
||||
source/api/nova..api.openstack.auth.rst
|
||||
source/api/nova..api.openstack.backup_schedules.rst
|
||||
source/api/nova..api.openstack.common.rst
|
||||
source/api/nova..api.openstack.consoles.rst
|
||||
source/api/nova..api.openstack.faults.rst
|
||||
source/api/nova..api.openstack.flavors.rst
|
||||
source/api/nova..api.openstack.images.rst
|
||||
source/api/nova..api.openstack.servers.rst
|
||||
source/api/nova..api.openstack.shared_ip_groups.rst
|
||||
source/api/nova..api.openstack.zones.rst
|
||||
source/api/nova..auth.dbdriver.rst
|
||||
source/api/nova..auth.fakeldap.rst
|
||||
source/api/nova..auth.ldapdriver.rst
|
||||
source/api/nova..auth.manager.rst
|
||||
source/api/nova..auth.signer.rst
|
||||
source/api/nova..cloudpipe.pipelib.rst
|
||||
source/api/nova..compute.api.rst
|
||||
source/api/nova..compute.instance_types.rst
|
||||
source/api/nova..compute.manager.rst
|
||||
source/api/nova..compute.monitor.rst
|
||||
source/api/nova..compute.power_state.rst
|
||||
source/api/nova..console.api.rst
|
||||
source/api/nova..console.fake.rst
|
||||
source/api/nova..console.manager.rst
|
||||
source/api/nova..console.xvp.rst
|
||||
source/api/nova..context.rst
|
||||
source/api/nova..crypto.rst
|
||||
source/api/nova..db.api.rst
|
||||
source/api/nova..db.base.rst
|
||||
source/api/nova..db.migration.rst
|
||||
source/api/nova..db.sqlalchemy.api.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
source/api/nova..db.sqlalchemy.migration.rst
|
||||
source/api/nova..db.sqlalchemy.models.rst
|
||||
source/api/nova..db.sqlalchemy.session.rst
|
||||
source/api/nova..exception.rst
|
||||
source/api/nova..fakememcache.rst
|
||||
source/api/nova..fakerabbit.rst
|
||||
source/api/nova..flags.rst
|
||||
source/api/nova..image.glance.rst
|
||||
source/api/nova..image.local.rst
|
||||
source/api/nova..image.s3.rst
|
||||
source/api/nova..image.service.rst
|
||||
source/api/nova..log.rst
|
||||
source/api/nova..manager.rst
|
||||
source/api/nova..network.api.rst
|
||||
source/api/nova..network.linux_net.rst
|
||||
source/api/nova..network.manager.rst
|
||||
source/api/nova..objectstore.bucket.rst
|
||||
source/api/nova..objectstore.handler.rst
|
||||
source/api/nova..objectstore.image.rst
|
||||
source/api/nova..objectstore.stored.rst
|
||||
source/api/nova..quota.rst
|
||||
source/api/nova..rpc.rst
|
||||
source/api/nova..scheduler.chance.rst
|
||||
source/api/nova..scheduler.driver.rst
|
||||
source/api/nova..scheduler.manager.rst
|
||||
source/api/nova..scheduler.simple.rst
|
||||
source/api/nova..scheduler.zone.rst
|
||||
source/api/nova..service.rst
|
||||
source/api/nova..test.rst
|
||||
source/api/nova..tests.api.openstack.fakes.rst
|
||||
source/api/nova..tests.api.openstack.test_adminapi.rst
|
||||
source/api/nova..tests.api.openstack.test_api.rst
|
||||
source/api/nova..tests.api.openstack.test_auth.rst
|
||||
source/api/nova..tests.api.openstack.test_common.rst
|
||||
source/api/nova..tests.api.openstack.test_faults.rst
|
||||
source/api/nova..tests.api.openstack.test_flavors.rst
|
||||
source/api/nova..tests.api.openstack.test_images.rst
|
||||
source/api/nova..tests.api.openstack.test_ratelimiting.rst
|
||||
source/api/nova..tests.api.openstack.test_servers.rst
|
||||
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
|
||||
source/api/nova..tests.api.openstack.test_zones.rst
|
||||
source/api/nova..tests.api.test_wsgi.rst
|
||||
source/api/nova..tests.db.fakes.rst
|
||||
source/api/nova..tests.declare_flags.rst
|
||||
source/api/nova..tests.fake_flags.rst
|
||||
source/api/nova..tests.glance.stubs.rst
|
||||
source/api/nova..tests.hyperv_unittest.rst
|
||||
source/api/nova..tests.objectstore_unittest.rst
|
||||
source/api/nova..tests.real_flags.rst
|
||||
source/api/nova..tests.runtime_flags.rst
|
||||
source/api/nova..tests.test_access.rst
|
||||
source/api/nova..tests.test_api.rst
|
||||
source/api/nova..tests.test_auth.rst
|
||||
source/api/nova..tests.test_cloud.rst
|
||||
source/api/nova..tests.test_compute.rst
|
||||
source/api/nova..tests.test_console.rst
|
||||
source/api/nova..tests.test_direct.rst
|
||||
source/api/nova..tests.test_flags.rst
|
||||
source/api/nova..tests.test_localization.rst
|
||||
source/api/nova..tests.test_log.rst
|
||||
source/api/nova..tests.test_middleware.rst
|
||||
source/api/nova..tests.test_misc.rst
|
||||
source/api/nova..tests.test_network.rst
|
||||
source/api/nova..tests.test_quota.rst
|
||||
source/api/nova..tests.test_rpc.rst
|
||||
source/api/nova..tests.test_scheduler.rst
|
||||
source/api/nova..tests.test_service.rst
|
||||
source/api/nova..tests.test_twistd.rst
|
||||
source/api/nova..tests.test_utils.rst
|
||||
source/api/nova..tests.test_virt.rst
|
||||
source/api/nova..tests.test_volume.rst
|
||||
source/api/nova..tests.test_xenapi.rst
|
||||
|
@ -43,6 +43,9 @@
|
||||
nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
|
||||
nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
|
||||
nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
|
||||
nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst
|
||||
nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst
|
||||
nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst
|
||||
nova..db.sqlalchemy.migration.rst
|
||||
nova..db.sqlalchemy.models.rst
|
||||
nova..db.sqlalchemy.session.rst
|
||||
@ -101,6 +104,7 @@
|
||||
nova..tests.test_console.rst
|
||||
nova..tests.test_direct.rst
|
||||
nova..tests.test_flags.rst
|
||||
nova..tests.test_instance_types.rst
|
||||
nova..tests.test_localization.rst
|
||||
nova..tests.test_log.rst
|
||||
nova..tests.test_middleware.rst
|
||||
@ -110,7 +114,9 @@
|
||||
nova..tests.test_rpc.rst
|
||||
nova..tests.test_scheduler.rst
|
||||
nova..tests.test_service.rst
|
||||
nova..tests.test_test.rst
|
||||
nova..tests.test_twistd.rst
|
||||
nova..tests.test_utils.rst
|
||||
nova..tests.test_virt.rst
|
||||
nova..tests.test_volume.rst
|
||||
nova..tests.test_xenapi.rst
|
||||
|
@ -0,0 +1,6 @@
|
||||
The :mod:`nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata` Module
|
||||
==============================================================================
|
||||
.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
@ -0,0 +1,6 @@
|
||||
The :mod:`nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes` Module
|
||||
==============================================================================
|
||||
.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
@ -0,0 +1,6 @@
|
||||
The :mod:`nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types` Module
|
||||
==============================================================================
|
||||
.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
6
doc/source/api/nova..tests.test_instance_types.rst
Normal file
6
doc/source/api/nova..tests.test_instance_types.rst
Normal file
@ -0,0 +1,6 @@
|
||||
The :mod:`nova..tests.test_instance_types` Module
|
||||
==============================================================================
|
||||
.. automodule:: nova..tests.test_instance_types
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
6
doc/source/api/nova..tests.test_test.rst
Normal file
6
doc/source/api/nova..tests.test_test.rst
Normal file
@ -0,0 +1,6 @@
|
||||
The :mod:`nova..tests.test_test` Module
|
||||
==============================================================================
|
||||
.. automodule:: nova..tests.test_test
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
6
doc/source/api/nova..tests.test_utils.rst
Normal file
6
doc/source/api/nova..tests.test_utils.rst
Normal file
@ -0,0 +1,6 @@
|
||||
The :mod:`nova..tests.test_utils` Module
|
||||
==============================================================================
|
||||
.. automodule:: nova..tests.test_utils
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
@ -179,6 +179,42 @@ Nova Floating IPs
|
||||
|
||||
Displays a list of all floating IP addresses.
|
||||
|
||||
Nova Flavor
|
||||
~~~~~~~~~~~
|
||||
|
||||
``nova-manage flavor list``
|
||||
|
||||
Outputs a list of all active flavors to the screen.
|
||||
|
||||
``nova-manage flavor list --all``
|
||||
|
||||
Outputs a list of all flavors (active and inactive) to the screen.
|
||||
|
||||
``nova-manage flavor create <name> <memory> <vCPU> <local_storage> <flavorID> <(optional) swap> <(optional) RXTX Quota> <(optional) RXTX Cap>``
|
||||
|
||||
creates a flavor with the following positional arguments:
|
||||
* memory (expressed in megabytes)
|
||||
* vcpu(s) (integer)
|
||||
* local storage (expressed in gigabytes)
|
||||
* flavorid (unique integer)
|
||||
* swap space (expressed in megabytes, defaults to zero, optional)
|
||||
* RXTX quotas (expressed in gigabytes, defaults to zero, optional)
|
||||
* RXTX cap (expressed in gigabytes, defaults to zero, optional)
|
||||
|
||||
``nova-manage flavor delete <name>``
|
||||
|
||||
Delete the flavor with the name <name>. This marks the flavor as inactive and cannot be launched. However, the record stays in the database for archival and billing purposes.
|
||||
|
||||
``nova-manage flavor delete <name> --purge``
|
||||
|
||||
Purges the flavor with the name <name>. This removes this flavor from the database.
|
||||
|
||||
|
||||
Nova Instance_type
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The instance_type command is provided as an alias for the flavor command. All the same subcommands and arguments from nova-manage flavor can be used.
|
||||
|
||||
|
||||
FILES
|
||||
========
|
||||
|
@ -64,6 +64,11 @@ Concept: Instances
|
||||
|
||||
An 'instance' is a word for a virtual machine that runs inside the cloud.
|
||||
|
||||
Concept: Instance Type
|
||||
----------------------
|
||||
|
||||
An 'instance type' describes the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching.
|
||||
|
||||
Concept: System Architecture
|
||||
----------------------------
|
||||
|
||||
|
84
doc/source/runnova/managing.instance.types.rst
Normal file
84
doc/source/runnova/managing.instance.types.rst
Normal file
@ -0,0 +1,84 @@
|
||||
..
|
||||
Copyright 2011 Ken Pepple
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
Managing Instance Types and Flavors
|
||||
===================================
|
||||
|
||||
What are Instance Types or Flavors ?
|
||||
------------------------------------
|
||||
|
||||
Instance types describe the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching. In the EC2 API, these are called by names such as "m1.large" or "m1.tiny", while the OpenStack API terms these "flavors" with names like "512 MB Server".
|
||||
|
||||
In Nova, "flavor" and "instance type" are equivalent terms. When you create an EC2 instance type, you are also creating a OpenStack API flavor. To reduce repetition, for the rest of this document I will refer to these as instance types.
|
||||
|
||||
Instance types can be in either the active or inactive state:
|
||||
* Active instance types are available to be used for launching instances
|
||||
* Inactive instance types are not available for launching instances
|
||||
|
||||
In the current (Cactus) version of nova, instance types can only be created by the nova administrator through the nova-manage command. Future versions of nova (in concert with the OpenStack API or EC2 API), may expose this functionality directly to users.
|
||||
|
||||
Basic Management
|
||||
----------------
|
||||
|
||||
Instance types / flavor are managed through the nova-manage binary with
|
||||
the "instance_type" command and an appropriate subcommand. Note that you can also use
|
||||
the "flavor" command as a synonym for "instance_types".
|
||||
|
||||
To see all currently active instance types, use the list subcommand::
|
||||
|
||||
# nova-manage instance_type list
|
||||
m1.medium: Memory: 4096MB, VCPUS: 2, Storage: 40GB, FlavorID: 3, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
m1.large: Memory: 8192MB, VCPUS: 4, Storage: 80GB, FlavorID: 4, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
m1.tiny: Memory: 512MB, VCPUS: 1, Storage: 0GB, FlavorID: 1, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
m1.xlarge: Memory: 16384MB, VCPUS: 8, Storage: 160GB, FlavorID: 5, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
m1.small: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
|
||||
By default, the list subcommand only shows active instance types. To see all instance types (inactive and active), use the list subcommand with the "--all" flag::
|
||||
|
||||
# nova-manage instance_type list --all
|
||||
m1.medium: Memory: 4096MB, VCPUS: 2, Storage: 40GB, FlavorID: 3, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
m1.large: Memory: 8192MB, VCPUS: 4, Storage: 80GB, FlavorID: 4, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
m1.tiny: Memory: 512MB, VCPUS: 1, Storage: 0GB, FlavorID: 1, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
m1.xlarge: Memory: 16384MB, VCPUS: 8, Storage: 160GB, FlavorID: 5, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
m1.small: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB
|
||||
m1.deleted: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB, inactive
|
||||
|
||||
To create an instance type, use the "create" subcommand with the following positional arguments:
|
||||
* memory (expressed in megabytes)
|
||||
* vcpu(s) (integer)
|
||||
* local storage (expressed in gigabytes)
|
||||
* flavorid (unique integer)
|
||||
* swap space (expressed in megabytes, defaults to zero, optional)
|
||||
* RXTX quotas (expressed in gigabytes, defaults to zero, optional)
|
||||
* RXTX cap (expressed in gigabytes, defaults to zero, optional)
|
||||
|
||||
The following example creates an instance type named "m1.xxlarge"::
|
||||
|
||||
# nova-manage instance_type create m1.xxlarge 32768 16 320 0 0 0
|
||||
m1.xxlarge created
|
||||
|
||||
To delete an instance type, use the "delete" subcommand and specify the name::
|
||||
|
||||
# nova-manage instance_type delete m1.xxlarge
|
||||
m1.xxlarge deleted
|
||||
|
||||
Please note that the "delete" command only marks the instance type as
|
||||
inactive in the database; it does not actually remove the instance type. This is done
|
||||
to preserve the instance type definition for long running instances (which may not
|
||||
terminate for months or years). If you are sure that you want to delete this instance
|
||||
type from the database, pass the "--purge" flag after the name::
|
||||
|
||||
# nova-manage instance_type delete m1.xxlarge --purge
|
||||
m1.xxlarge purged
|
@ -55,11 +55,8 @@ Python dependencies
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get install python-setuptools
|
||||
wget https://fedorahosted.org/releases/s/u/suds/python-suds-0.4.tar.gz
|
||||
tar -zxvf python-suds-0.4.tar.gz
|
||||
cd python-suds-0.4
|
||||
sudo python setup.py install
|
||||
easy_install suds==0.4
|
||||
|
||||
|
||||
Configuration flags required for nova-compute
|
||||
---------------------------------------------
|
||||
|
@ -78,20 +78,27 @@ def _bytes2int(bytes):
|
||||
|
||||
def _parse_network_details(machine_id):
|
||||
"""Parse the machine.id field to get MAC, IP, Netmask and Gateway fields
|
||||
machine.id is of the form MAC;IP;Netmask;Gateway; where ';' is
|
||||
the separator.
|
||||
machine.id is of the form MAC;IP;Netmask;Gateway;Broadcast;DNS1,DNS2
|
||||
where ';' is the separator.
|
||||
"""
|
||||
network_details = []
|
||||
if machine_id[1].strip() == NO_MACHINE_ID:
|
||||
if machine_id[1].strip() == "1":
|
||||
pass
|
||||
else:
|
||||
network_info_list = machine_id[0].split(';')
|
||||
assert len(network_info_list) % 4 == 0
|
||||
for i in xrange(0, len(network_info_list) / 4):
|
||||
network_details.append((network_info_list[i].strip().lower(),
|
||||
network_info_list[i + 1].strip(),
|
||||
network_info_list[i + 2].strip(),
|
||||
network_info_list[i + 3].strip()))
|
||||
assert len(network_info_list) % 6 == 0
|
||||
no_grps = len(network_info_list) / 6
|
||||
i = 0
|
||||
while i < no_grps:
|
||||
k = i * 6
|
||||
network_details.append((
|
||||
network_info_list[k].strip().lower(),
|
||||
network_info_list[k + 1].strip(),
|
||||
network_info_list[k + 2].strip(),
|
||||
network_info_list[k + 3].strip(),
|
||||
network_info_list[k + 4].strip(),
|
||||
network_info_list[k + 5].strip().split(',')))
|
||||
i += 1
|
||||
return network_details
|
||||
|
||||
|
||||
@ -218,7 +225,7 @@ def _execute(cmd_list, process_input=None, check_exit_code=True):
|
||||
return result
|
||||
|
||||
|
||||
def _windows_set_ipaddress():
|
||||
def _windows_set_networking():
|
||||
"""Set IP address for the windows VM"""
|
||||
program_files = os.environ.get('PROGRAMFILES')
|
||||
program_files_x86 = os.environ.get('PROGRAMFILES(X86)')
|
||||
@ -240,7 +247,8 @@ def _windows_set_ipaddress():
|
||||
cmd = ['"' + vmware_tools_bin + '"', '--cmd', 'machine.id.get']
|
||||
for network_detail in _parse_network_details(_execute(cmd,
|
||||
check_exit_code=False)):
|
||||
mac_address, ip_address, subnet_mask, gateway = network_detail
|
||||
mac_address, ip_address, subnet_mask, gateway, broadcast,\
|
||||
dns_servers = network_detail
|
||||
adapter_name, current_ip_address = \
|
||||
_get_win_adapter_name_and_ip_address(mac_address)
|
||||
if adapter_name and not ip_address == current_ip_address:
|
||||
@ -248,11 +256,65 @@ def _windows_set_ipaddress():
|
||||
'name="%s"' % adapter_name, 'source=static', ip_address,
|
||||
subnet_mask, gateway, '1']
|
||||
_execute(cmd)
|
||||
#Windows doesn't let you manually set the broadcast address
|
||||
for dns_server in dns_servers:
|
||||
if dns_server:
|
||||
cmd = ['netsh', 'interface', 'ip', 'add', 'dns',
|
||||
'name="%s"' % adapter_name, dns_server]
|
||||
_execute(cmd)
|
||||
else:
|
||||
logging.warn(_("VMware Tools is not installed"))
|
||||
|
||||
|
||||
def _linux_set_ipaddress():
|
||||
def _filter_duplicates(all_entries):
|
||||
final_list = []
|
||||
for entry in all_entries:
|
||||
if entry and entry not in final_list:
|
||||
final_list.append(entry)
|
||||
return final_list
|
||||
|
||||
|
||||
def _set_rhel_networking(network_details=[]):
|
||||
all_dns_servers = []
|
||||
for network_detail in network_details:
|
||||
mac_address, ip_address, subnet_mask, gateway, broadcast,\
|
||||
dns_servers = network_detail
|
||||
all_dns_servers.extend(dns_servers)
|
||||
adapter_name, current_ip_address = \
|
||||
_get_linux_adapter_name_and_ip_address(mac_address)
|
||||
if adapter_name and not ip_address == current_ip_address:
|
||||
interface_file_name = \
|
||||
'/etc/sysconfig/network-scripts/ifcfg-%s' % adapter_name
|
||||
#Remove file
|
||||
os.remove(interface_file_name)
|
||||
#Touch file
|
||||
_execute(['touch', interface_file_name])
|
||||
interface_file = open(interface_file_name, 'w')
|
||||
interface_file.write('\nDEVICE=%s' % adapter_name)
|
||||
interface_file.write('\nUSERCTL=yes')
|
||||
interface_file.write('\nONBOOT=yes')
|
||||
interface_file.write('\nBOOTPROTO=static')
|
||||
interface_file.write('\nBROADCAST=%s' % broadcast)
|
||||
interface_file.write('\nNETWORK=')
|
||||
interface_file.write('\nGATEWAY=%s' % gateway)
|
||||
interface_file.write('\nNETMASK=%s' % subnet_mask)
|
||||
interface_file.write('\nIPADDR=%s' % ip_address)
|
||||
interface_file.write('\nMACADDR=%s' % mac_address)
|
||||
interface_file.close()
|
||||
if all_dns_servers:
|
||||
dns_file_name = "/etc/resolv.conf"
|
||||
os.remove(dns_file_name)
|
||||
_execute(['touch', dns_file_name])
|
||||
dns_file = open(dns_file_name, 'w')
|
||||
dns_file.write("; generated by OpenStack guest tools")
|
||||
unique_entries = _filter_duplicates(all_dns_servers)
|
||||
for dns_server in unique_entries:
|
||||
dns_file.write("\nnameserver %s" % dns_server)
|
||||
dns_file.close()
|
||||
_execute(['/sbin/service', 'network', 'restart'])
|
||||
|
||||
|
||||
def _linux_set_networking():
|
||||
"""Set IP address for the Linux VM"""
|
||||
vmware_tools_bin = None
|
||||
if os.path.exists('/usr/sbin/vmtoolsd'):
|
||||
@ -265,38 +327,18 @@ def _linux_set_ipaddress():
|
||||
vmware_tools_bin = '/usr/bin/vmware-guestd'
|
||||
if vmware_tools_bin:
|
||||
cmd = [vmware_tools_bin, '--cmd', 'machine.id.get']
|
||||
for network_detail in _parse_network_details(_execute(cmd,
|
||||
check_exit_code=False)):
|
||||
mac_address, ip_address, subnet_mask, gateway = network_detail
|
||||
adapter_name, current_ip_address = \
|
||||
_get_linux_adapter_name_and_ip_address(mac_address)
|
||||
if adapter_name and not ip_address == current_ip_address:
|
||||
interface_file_name = \
|
||||
'/etc/sysconfig/network-scripts/ifcfg-%s' % adapter_name
|
||||
#Remove file
|
||||
os.remove(interface_file_name)
|
||||
#Touch file
|
||||
_execute(['touch', interface_file_name])
|
||||
interface_file = open(interface_file_name, 'w')
|
||||
interface_file.write('\nDEVICE=%s' % adapter_name)
|
||||
interface_file.write('\nUSERCTL=yes')
|
||||
interface_file.write('\nONBOOT=yes')
|
||||
interface_file.write('\nBOOTPROTO=static')
|
||||
interface_file.write('\nBROADCAST=')
|
||||
interface_file.write('\nNETWORK=')
|
||||
interface_file.write('\nNETMASK=%s' % subnet_mask)
|
||||
interface_file.write('\nIPADDR=%s' % ip_address)
|
||||
interface_file.write('\nMACADDR=%s' % mac_address)
|
||||
interface_file.close()
|
||||
_execute(['/sbin/service', 'network' 'restart'])
|
||||
network_details = _parse_network_details(_execute(cmd,
|
||||
check_exit_code=False))
|
||||
#TODO: For other distros like ubuntu, suse, debian, BSD, etc.
|
||||
_set_rhel_networking(network_details)
|
||||
else:
|
||||
logging.warn(_("VMware Tools is not installed"))
|
||||
|
||||
if __name__ == '__main__':
|
||||
pltfrm = sys.platform
|
||||
if pltfrm == PLATFORM_WIN:
|
||||
_windows_set_ipaddress()
|
||||
_windows_set_networking()
|
||||
elif pltfrm == PLATFORM_LINUX:
|
||||
_linux_set_ipaddress()
|
||||
_linux_set_networking()
|
||||
else:
|
||||
raise NotImplementedError(_("Platform not implemented: '%s'") % pltfrm)
|
||||
|
@ -29,7 +29,6 @@ from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import instance_types
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@ -80,8 +79,8 @@ def host_dict(host, compute_service, instances, volume_service, volumes, now):
|
||||
return rv
|
||||
|
||||
|
||||
def instance_dict(name, inst):
|
||||
return {'name': name,
|
||||
def instance_dict(inst):
|
||||
return {'name': inst['name'],
|
||||
'memory_mb': inst['memory_mb'],
|
||||
'vcpus': inst['vcpus'],
|
||||
'disk_gb': inst['local_gb'],
|
||||
@ -115,9 +114,9 @@ class AdminController(object):
|
||||
def __str__(self):
|
||||
return 'AdminController'
|
||||
|
||||
def describe_instance_types(self, _context, **_kwargs):
|
||||
return {'instanceTypeSet': [instance_dict(n, v) for n, v in
|
||||
instance_types.INSTANCE_TYPES.iteritems()]}
|
||||
def describe_instance_types(self, context, **_kwargs):
|
||||
"""Returns all active instance types data (vcpus, memory, etc.)"""
|
||||
return {'instanceTypeSet': [db.instance_type_get_all(context)]}
|
||||
|
||||
def describe_user(self, _context, name, **_kwargs):
|
||||
"""Returns user data, including access and secret keys."""
|
||||
|
@ -52,7 +52,23 @@ def _database_to_isoformat(datetimeobj):
|
||||
|
||||
|
||||
def _try_convert(value):
|
||||
"""Return a non-string if possible"""
|
||||
"""Return a non-string from a string or unicode, if possible.
|
||||
|
||||
============= =====================================================
|
||||
When value is returns
|
||||
============= =====================================================
|
||||
zero-length ''
|
||||
'None' None
|
||||
'True' True
|
||||
'False' False
|
||||
'0', '-0' 0
|
||||
0xN, -0xN int from hex (postitive) (N is any number)
|
||||
0bN, -0bN int from binary (positive) (N is any number)
|
||||
* try conversion to int, float, complex, fallback value
|
||||
|
||||
"""
|
||||
if len(value) == 0:
|
||||
return ''
|
||||
if value == 'None':
|
||||
return None
|
||||
if value == 'True':
|
||||
|
@ -298,7 +298,7 @@ class CloudController(object):
|
||||
'keyFingerprint': key_pair['fingerprint'],
|
||||
})
|
||||
|
||||
return {'keypairsSet': result}
|
||||
return {'keySet': result}
|
||||
|
||||
def create_key_pair(self, context, key_name, **kwargs):
|
||||
LOG.audit(_("Create key pair %s"), key_name, context=context)
|
||||
@ -838,14 +838,14 @@ class CloudController(object):
|
||||
self.compute_api.unrescue(context, instance_id=instance_id)
|
||||
return True
|
||||
|
||||
def update_instance(self, context, ec2_id, **kwargs):
|
||||
def update_instance(self, context, instance_id, **kwargs):
|
||||
updatable_fields = ['display_name', 'display_description']
|
||||
changes = {}
|
||||
for field in updatable_fields:
|
||||
if field in kwargs:
|
||||
changes[field] = kwargs[field]
|
||||
if changes:
|
||||
instance_id = ec2_id_to_id(ec2_id)
|
||||
instance_id = ec2_id_to_id(instance_id)
|
||||
self.compute_api.update(context, instance_id=instance_id, **kwargs)
|
||||
return True
|
||||
|
||||
|
@ -74,12 +74,15 @@ class APIRouter(wsgi.Router):
|
||||
server_members = {'action': 'POST'}
|
||||
if FLAGS.allow_admin_api:
|
||||
LOG.debug(_("Including admin operations in API."))
|
||||
|
||||
server_members['pause'] = 'POST'
|
||||
server_members['unpause'] = 'POST'
|
||||
server_members["diagnostics"] = "GET"
|
||||
server_members["actions"] = "GET"
|
||||
server_members['suspend'] = 'POST'
|
||||
server_members['resume'] = 'POST'
|
||||
server_members['rescue'] = 'POST'
|
||||
server_members['unrescue'] = 'POST'
|
||||
server_members['reset_network'] = 'POST'
|
||||
server_members['inject_network_info'] = 'POST'
|
||||
|
||||
|
@ -15,6 +15,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import webob.exc
|
||||
|
||||
from nova import exception
|
||||
|
||||
|
||||
@ -27,7 +29,8 @@ def limited(items, request, max_limit=1000):
|
||||
GET variables. 'offset' is where to start in the list,
|
||||
and 'limit' is the maximum number of items to return. If
|
||||
'limit' is not specified, 0, or > max_limit, we default
|
||||
to max_limit.
|
||||
to max_limit. Negative values for either offset or limit
|
||||
will cause exc.HTTPBadRequest() exceptions to be raised.
|
||||
@kwarg max_limit: The maximum number of items to return from 'items'
|
||||
"""
|
||||
try:
|
||||
@ -40,6 +43,9 @@ def limited(items, request, max_limit=1000):
|
||||
except ValueError:
|
||||
limit = max_limit
|
||||
|
||||
if offset < 0 or limit < 0:
|
||||
raise webob.exc.HTTPBadRequest()
|
||||
|
||||
limit = min(max_limit, limit or max_limit)
|
||||
range_end = offset + limit
|
||||
return items[offset:range_end]
|
||||
|
@ -17,6 +17,8 @@
|
||||
|
||||
from webob import exc
|
||||
|
||||
from nova import db
|
||||
from nova import context
|
||||
from nova.api.openstack import faults
|
||||
from nova.api.openstack import common
|
||||
from nova.compute import instance_types
|
||||
@ -39,19 +41,19 @@ class Controller(wsgi.Controller):
|
||||
|
||||
def detail(self, req):
|
||||
"""Return all flavors in detail."""
|
||||
items = [self.show(req, id)['flavor'] for id in self._all_ids()]
|
||||
items = common.limited(items, req)
|
||||
items = [self.show(req, id)['flavor'] for id in self._all_ids(req)]
|
||||
return dict(flavors=items)
|
||||
|
||||
def show(self, req, id):
|
||||
"""Return data about the given flavor id."""
|
||||
for name, val in instance_types.INSTANCE_TYPES.iteritems():
|
||||
if val['flavorid'] == int(id):
|
||||
item = dict(ram=val['memory_mb'], disk=val['local_gb'],
|
||||
id=val['flavorid'], name=name)
|
||||
return dict(flavor=item)
|
||||
ctxt = req.environ['nova.context']
|
||||
values = db.instance_type_get_by_flavor_id(ctxt, id)
|
||||
return dict(flavor=values)
|
||||
raise faults.Fault(exc.HTTPNotFound())
|
||||
|
||||
def _all_ids(self):
|
||||
def _all_ids(self, req):
|
||||
"""Return the list of all flavorids."""
|
||||
return [i['flavorid'] for i in instance_types.INSTANCE_TYPES.values()]
|
||||
ctxt = req.environ['nova.context']
|
||||
inst_types = db.instance_type_get_all(ctxt)
|
||||
flavor_ids = [inst_types[i]['flavorid'] for i in inst_types.keys()]
|
||||
return sorted(flavor_ids)
|
||||
|
@ -335,6 +335,28 @@ class Controller(wsgi.Controller):
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def rescue(self, req, id):
|
||||
"""Permit users to rescue the server."""
|
||||
context = req.environ["nova.context"]
|
||||
try:
|
||||
self.compute_api.rescue(context, id)
|
||||
except:
|
||||
readable = traceback.format_exc()
|
||||
LOG.exception(_("compute.api::rescue %s"), readable)
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def unrescue(self, req, id):
|
||||
"""Permit users to unrescue the server."""
|
||||
context = req.environ["nova.context"]
|
||||
try:
|
||||
self.compute_api.unrescue(context, id)
|
||||
except:
|
||||
readable = traceback.format_exc()
|
||||
LOG.exception(_("compute.api::unrescue %s"), readable)
|
||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||
return exc.HTTPAccepted()
|
||||
|
||||
def get_ajax_console(self, req, id):
|
||||
""" Returns a url to an instance's ajaxterm console. """
|
||||
try:
|
||||
|
@ -88,9 +88,9 @@ class API(base.Base):
|
||||
availability_zone=None, user_data=None, metadata=[],
|
||||
onset_files=None):
|
||||
"""Create the number of instances requested if quota and
|
||||
other arguments check out ok.
|
||||
"""
|
||||
type_data = instance_types.INSTANCE_TYPES[instance_type]
|
||||
other arguments check out ok."""
|
||||
|
||||
type_data = instance_types.get_instance_type(instance_type)
|
||||
num_instances = quota.allowed_instances(context, max_count, type_data)
|
||||
if num_instances < min_count:
|
||||
pid = context.project_id
|
||||
@ -319,12 +319,12 @@ class API(base.Base):
|
||||
try:
|
||||
instance = self.get(context, instance_id)
|
||||
except exception.NotFound:
|
||||
LOG.warning(_("Instance %d was not found during terminate"),
|
||||
LOG.warning(_("Instance %s was not found during terminate"),
|
||||
instance_id)
|
||||
raise
|
||||
|
||||
if (instance['state_description'] == 'terminating'):
|
||||
LOG.warning(_("Instance %d is already being terminated"),
|
||||
LOG.warning(_("Instance %s is already being terminated"),
|
||||
instance_id)
|
||||
return
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
# Copyright 2011 Ken Pepple
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -21,30 +22,120 @@
|
||||
The built-in instance properties.
|
||||
"""
|
||||
|
||||
from nova import flags
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
INSTANCE_TYPES = {
|
||||
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
|
||||
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
|
||||
'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
|
||||
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
|
||||
'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
|
||||
LOG = logging.getLogger('nova.instance_types')
|
||||
|
||||
|
||||
def create(name, memory, vcpus, local_gb, flavorid, swap=0,
|
||||
rxtx_quota=0, rxtx_cap=0):
|
||||
"""Creates instance types / flavors
|
||||
arguments: name memory vcpus local_gb flavorid swap rxtx_quota rxtx_cap
|
||||
"""
|
||||
for option in [memory, vcpus, local_gb, flavorid]:
|
||||
try:
|
||||
int(option)
|
||||
except ValueError:
|
||||
raise exception.InvalidInputException(
|
||||
_("create arguments must be positive integers"))
|
||||
if (int(memory) <= 0) or (int(vcpus) <= 0) or (int(local_gb) < 0):
|
||||
raise exception.InvalidInputException(
|
||||
_("create arguments must be positive integers"))
|
||||
|
||||
try:
|
||||
db.instance_type_create(
|
||||
context.get_admin_context(),
|
||||
dict(name=name,
|
||||
memory_mb=memory,
|
||||
vcpus=vcpus,
|
||||
local_gb=local_gb,
|
||||
flavorid=flavorid,
|
||||
swap=swap,
|
||||
rxtx_quota=rxtx_quota,
|
||||
rxtx_cap=rxtx_cap))
|
||||
except exception.DBError, e:
|
||||
LOG.exception(_('DB error: %s' % e))
|
||||
raise exception.ApiError(_("Cannot create instance type: %s" % name))
|
||||
|
||||
|
||||
def destroy(name):
|
||||
"""Marks instance types / flavors as deleted
|
||||
arguments: name"""
|
||||
if name == None:
|
||||
raise exception.InvalidInputException(_("No instance type specified"))
|
||||
else:
|
||||
try:
|
||||
db.instance_type_destroy(context.get_admin_context(), name)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_('Instance type %s not found for deletion' % name))
|
||||
raise exception.ApiError(_("Unknown instance type: %s" % name))
|
||||
|
||||
|
||||
def purge(name):
|
||||
"""Removes instance types / flavors from database
|
||||
arguments: name"""
|
||||
if name == None:
|
||||
raise exception.InvalidInputException(_("No instance type specified"))
|
||||
else:
|
||||
try:
|
||||
db.instance_type_purge(context.get_admin_context(), name)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_('Instance type %s not found for purge' % name))
|
||||
raise exception.ApiError(_("Unknown instance type: %s" % name))
|
||||
|
||||
|
||||
def get_all_types(inactive=0):
|
||||
"""Retrieves non-deleted instance_types.
|
||||
Pass true as argument if you want deleted instance types returned also."""
|
||||
return db.instance_type_get_all(context.get_admin_context(), inactive)
|
||||
|
||||
|
||||
def get_all_flavors():
|
||||
"""retrieves non-deleted flavors. alias for instance_types.get_all_types().
|
||||
Pass true as argument if you want deleted instance types returned also."""
|
||||
return get_all_types(context.get_admin_context())
|
||||
|
||||
|
||||
def get_instance_type(name):
|
||||
"""Retrieves single instance type by name"""
|
||||
if name is None:
|
||||
return FLAGS.default_instance_type
|
||||
try:
|
||||
ctxt = context.get_admin_context()
|
||||
inst_type = db.instance_type_get_by_name(ctxt, name)
|
||||
return inst_type
|
||||
except exception.DBError:
|
||||
raise exception.ApiError(_("Unknown instance type: %s" % name))
|
||||
|
||||
|
||||
def get_by_type(instance_type):
|
||||
"""Build instance data structure and save it to the data store."""
|
||||
"""retrieve instance type name"""
|
||||
if instance_type is None:
|
||||
return FLAGS.default_instance_type
|
||||
if instance_type not in INSTANCE_TYPES:
|
||||
raise exception.ApiError(_("Unknown instance type: %s") % \
|
||||
instance_type, "Invalid")
|
||||
return instance_type
|
||||
|
||||
try:
|
||||
ctxt = context.get_admin_context()
|
||||
inst_type = db.instance_type_get_by_name(ctxt, instance_type)
|
||||
return inst_type['name']
|
||||
except exception.DBError, e:
|
||||
LOG.exception(_('DB error: %s' % e))
|
||||
raise exception.ApiError(_("Unknown instance type: %s" %\
|
||||
instance_type))
|
||||
|
||||
|
||||
def get_by_flavor_id(flavor_id):
|
||||
for instance_type, details in INSTANCE_TYPES.iteritems():
|
||||
if details['flavorid'] == int(flavor_id):
|
||||
return instance_type
|
||||
return FLAGS.default_instance_type
|
||||
"""retrieve instance type's name by flavor_id"""
|
||||
if flavor_id is None:
|
||||
return FLAGS.default_instance_type
|
||||
try:
|
||||
ctxt = context.get_admin_context()
|
||||
flavor = db.instance_type_get_by_flavor_id(ctxt, flavor_id)
|
||||
return flavor['name']
|
||||
except exception.DBError, e:
|
||||
LOG.exception(_('DB error: %s' % e))
|
||||
raise exception.ApiError(_("Unknown flavor: %s" % flavor_id))
|
||||
|
@ -370,12 +370,19 @@ class ComputeManager(manager.Manager):
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'rescuing')
|
||||
self.db.instance_set_state(
|
||||
context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'rescuing')
|
||||
self.network_manager.setup_compute_network(context, instance_id)
|
||||
self.driver.rescue(instance_ref)
|
||||
self.driver.rescue(
|
||||
instance_ref,
|
||||
lambda result: self._update_state_callback(
|
||||
self,
|
||||
context,
|
||||
instance_id,
|
||||
result))
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@exception.wrap_exception
|
||||
@ -385,11 +392,18 @@ class ComputeManager(manager.Manager):
|
||||
context = context.elevated()
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
|
||||
self.db.instance_set_state(context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'unrescuing')
|
||||
self.driver.unrescue(instance_ref)
|
||||
self.db.instance_set_state(
|
||||
context,
|
||||
instance_id,
|
||||
power_state.NOSTATE,
|
||||
'unrescuing')
|
||||
self.driver.unrescue(
|
||||
instance_ref,
|
||||
lambda result: self._update_state_callback(
|
||||
self,
|
||||
context,
|
||||
instance_id,
|
||||
result))
|
||||
self._update_state(context, instance_id)
|
||||
|
||||
@staticmethod
|
||||
|
@ -1007,6 +1007,41 @@ def console_get(context, console_id, instance_id=None):
|
||||
return IMPL.console_get(context, console_id, instance_id)
|
||||
|
||||
|
||||
##################
|
||||
|
||||
|
||||
def instance_type_create(context, values):
|
||||
"""Create a new instance type"""
|
||||
return IMPL.instance_type_create(context, values)
|
||||
|
||||
|
||||
def instance_type_get_all(context, inactive=0):
|
||||
"""Get all instance types"""
|
||||
return IMPL.instance_type_get_all(context, inactive)
|
||||
|
||||
|
||||
def instance_type_get_by_name(context, name):
|
||||
"""Get instance type by name"""
|
||||
return IMPL.instance_type_get_by_name(context, name)
|
||||
|
||||
|
||||
def instance_type_get_by_flavor_id(context, id):
|
||||
"""Get instance type by name"""
|
||||
return IMPL.instance_type_get_by_flavor_id(context, id)
|
||||
|
||||
|
||||
def instance_type_destroy(context, name):
|
||||
"""Delete a instance type"""
|
||||
return IMPL.instance_type_destroy(context, name)
|
||||
|
||||
|
||||
def instance_type_purge(context, name):
|
||||
"""Purges (removes) an instance type from DB
|
||||
Use instance_type_destroy for most cases
|
||||
"""
|
||||
return IMPL.instance_type_purge(context, name)
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
|
@ -2073,6 +2073,98 @@ def console_get(context, console_id, instance_id=None):
|
||||
return result
|
||||
|
||||
|
||||
##################
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def instance_type_create(_context, values):
|
||||
try:
|
||||
instance_type_ref = models.InstanceTypes()
|
||||
instance_type_ref.update(values)
|
||||
instance_type_ref.save()
|
||||
except:
|
||||
raise exception.DBError
|
||||
return instance_type_ref
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_type_get_all(context, inactive=0):
|
||||
"""
|
||||
Returns a dict describing all instance_types with name as key.
|
||||
"""
|
||||
session = get_session()
|
||||
if inactive:
|
||||
inst_types = session.query(models.InstanceTypes).\
|
||||
order_by("name").\
|
||||
all()
|
||||
else:
|
||||
inst_types = session.query(models.InstanceTypes).\
|
||||
filter_by(deleted=inactive).\
|
||||
order_by("name").\
|
||||
all()
|
||||
if inst_types:
|
||||
inst_dict = {}
|
||||
for i in inst_types:
|
||||
inst_dict[i['name']] = dict(i)
|
||||
return inst_dict
|
||||
else:
|
||||
raise exception.NotFound
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_type_get_by_name(context, name):
|
||||
"""Returns a dict describing specific instance_type"""
|
||||
session = get_session()
|
||||
inst_type = session.query(models.InstanceTypes).\
|
||||
filter_by(name=name).\
|
||||
first()
|
||||
if not inst_type:
|
||||
raise exception.NotFound(_("No instance type with name %s") % name)
|
||||
else:
|
||||
return dict(inst_type)
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_type_get_by_flavor_id(context, id):
|
||||
"""Returns a dict describing specific flavor_id"""
|
||||
session = get_session()
|
||||
inst_type = session.query(models.InstanceTypes).\
|
||||
filter_by(flavorid=int(id)).\
|
||||
first()
|
||||
if not inst_type:
|
||||
raise exception.NotFound(_("No flavor with name %s") % id)
|
||||
else:
|
||||
return dict(inst_type)
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def instance_type_destroy(context, name):
|
||||
""" Marks specific instance_type as deleted"""
|
||||
session = get_session()
|
||||
instance_type_ref = session.query(models.InstanceTypes).\
|
||||
filter_by(name=name)
|
||||
records = instance_type_ref.update(dict(deleted=1))
|
||||
if records == 0:
|
||||
raise exception.NotFound
|
||||
else:
|
||||
return instance_type_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def instance_type_purge(context, name):
|
||||
""" Removes specific instance_type from DB
|
||||
Usually instance_type_destroy should be used
|
||||
"""
|
||||
session = get_session()
|
||||
instance_type_ref = session.query(models.InstanceTypes).\
|
||||
filter_by(name=name)
|
||||
records = instance_type_ref.delete()
|
||||
if records == 0:
|
||||
raise exception.NotFound
|
||||
else:
|
||||
return instance_type_ref
|
||||
|
||||
|
||||
####################
|
||||
|
||||
|
||||
|
@ -0,0 +1,90 @@
|
||||
# Copyright 2011 OpenStack LLC
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import *
|
||||
from migrate import *
|
||||
|
||||
from nova import log as logging
|
||||
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
|
||||
# Table stub-definitions
|
||||
# Just for the ForeignKey and column creation to succeed, these are not the
|
||||
# actual definitions of instances or services.
|
||||
#
|
||||
fixed_ips = Table(
|
||||
"fixed_ips",
|
||||
meta,
|
||||
Column(
|
||||
"id",
|
||||
Integer(),
|
||||
primary_key=True,
|
||||
nullable=False))
|
||||
|
||||
#
|
||||
# New Tables
|
||||
#
|
||||
# None
|
||||
|
||||
#
|
||||
# Tables to alter
|
||||
#
|
||||
# None
|
||||
|
||||
#
|
||||
# Columns to add to existing tables
|
||||
#
|
||||
|
||||
fixed_ips_addressV6 = Column(
|
||||
"addressV6",
|
||||
String(
|
||||
length=255,
|
||||
convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False))
|
||||
|
||||
|
||||
fixed_ips_netmaskV6 = Column(
|
||||
"netmaskV6",
|
||||
String(
|
||||
length=3,
|
||||
convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False))
|
||||
|
||||
|
||||
fixed_ips_gatewayV6 = Column(
|
||||
"gatewayV6",
|
||||
String(
|
||||
length=255,
|
||||
convert_unicode=False,
|
||||
assert_unicode=None,
|
||||
unicode_error=None,
|
||||
_warn_on_bytestring=False))
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
# Upgrade operations go here. Don't create your own engine;
|
||||
# bind migrate_engine to your metadata
|
||||
meta.bind = migrate_engine
|
||||
|
||||
# Add columns to existing tables
|
||||
fixed_ips.create_column(fixed_ips_addressV6)
|
||||
fixed_ips.create_column(fixed_ips_netmaskV6)
|
||||
fixed_ips.create_column(fixed_ips_gatewayV6)
|
@ -0,0 +1,87 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Ken Pepple
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import *
|
||||
from migrate import *
|
||||
|
||||
from nova import api
|
||||
from nova import db
|
||||
from nova import log as logging
|
||||
|
||||
import datetime
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
|
||||
#
|
||||
# New Tables
|
||||
#
|
||||
instance_types = Table('instance_types', meta,
|
||||
Column('created_at', DateTime(timezone=False)),
|
||||
Column('updated_at', DateTime(timezone=False)),
|
||||
Column('deleted_at', DateTime(timezone=False)),
|
||||
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||
Column('name',
|
||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||
unicode_error=None, _warn_on_bytestring=False),
|
||||
unique=True),
|
||||
Column('id', Integer(), primary_key=True, nullable=False),
|
||||
Column('memory_mb', Integer(), nullable=False),
|
||||
Column('vcpus', Integer(), nullable=False),
|
||||
Column('local_gb', Integer(), nullable=False),
|
||||
Column('flavorid', Integer(), nullable=False, unique=True),
|
||||
Column('swap', Integer(), nullable=False, default=0),
|
||||
Column('rxtx_quota', Integer(), nullable=False, default=0),
|
||||
Column('rxtx_cap', Integer(), nullable=False, default=0))
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
# Upgrade operations go here
|
||||
# Don't create your own engine; bind migrate_engine
|
||||
# to your metadata
|
||||
meta.bind = migrate_engine
|
||||
try:
|
||||
instance_types.create()
|
||||
except Exception:
|
||||
logging.info(repr(table))
|
||||
logging.exception('Exception while creating instance_types table')
|
||||
raise
|
||||
|
||||
# Here are the old static instance types
|
||||
INSTANCE_TYPES = {
|
||||
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
|
||||
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
|
||||
'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
|
||||
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
|
||||
'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
|
||||
try:
|
||||
i = instance_types.insert()
|
||||
for name, values in INSTANCE_TYPES.iteritems():
|
||||
# FIXME(kpepple) should we be seeding created_at / updated_at ?
|
||||
# now = datetime.datatime.utcnow()
|
||||
i.execute({'name': name, 'memory_mb': values["memory_mb"],
|
||||
'vcpus': values["vcpus"], 'deleted': 0,
|
||||
'local_gb': values["local_gb"],
|
||||
'flavorid': values["flavorid"]})
|
||||
except Exception:
|
||||
logging.info(repr(table))
|
||||
logging.exception('Exception while seeding instance_types table')
|
||||
raise
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
# Operations to reverse the above upgrade go here.
|
||||
for table in (instance_types):
|
||||
table.drop()
|
@ -126,11 +126,16 @@ class Certificate(BASE, NovaBase):
|
||||
class Instance(BASE, NovaBase):
|
||||
"""Represents a guest vm."""
|
||||
__tablename__ = 'instances'
|
||||
onset_files = []
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return FLAGS.instance_name_template % self.id
|
||||
base_name = FLAGS.instance_name_template % self.id
|
||||
if getattr(self, '_rescue', False):
|
||||
base_name += "-rescue"
|
||||
return base_name
|
||||
|
||||
admin_pass = Column(String(255))
|
||||
user_id = Column(String(255))
|
||||
@ -210,6 +215,20 @@ class InstanceActions(BASE, NovaBase):
|
||||
error = Column(Text)
|
||||
|
||||
|
||||
class InstanceTypes(BASE, NovaBase):
|
||||
"""Represent possible instance_types or flavor of VM offered"""
|
||||
__tablename__ = "instance_types"
|
||||
id = Column(Integer, primary_key=True)
|
||||
name = Column(String(255), unique=True)
|
||||
memory_mb = Column(Integer)
|
||||
vcpus = Column(Integer)
|
||||
local_gb = Column(Integer)
|
||||
flavorid = Column(Integer, unique=True)
|
||||
swap = Column(Integer, nullable=False, default=0)
|
||||
rxtx_quota = Column(Integer, nullable=False, default=0)
|
||||
rxtx_cap = Column(Integer, nullable=False, default=0)
|
||||
|
||||
|
||||
class Volume(BASE, NovaBase):
|
||||
"""Represents a block storage device that can be attached to a vm."""
|
||||
__tablename__ = 'volumes'
|
||||
@ -437,6 +456,9 @@ class FixedIp(BASE, NovaBase):
|
||||
allocated = Column(Boolean, default=False)
|
||||
leased = Column(Boolean, default=False)
|
||||
reserved = Column(Boolean, default=False)
|
||||
addressV6 = Column(String(255))
|
||||
netmaskV6 = Column(String(3))
|
||||
gatewayV6 = Column(String(255))
|
||||
|
||||
|
||||
class User(BASE, NovaBase):
|
||||
@ -571,7 +593,7 @@ def register_models():
|
||||
connection is lost and needs to be reestablished.
|
||||
"""
|
||||
from sqlalchemy import create_engine
|
||||
models = (Service, Instance, InstanceActions,
|
||||
models = (Service, Instance, InstanceActions, InstanceTypes,
|
||||
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
|
||||
Network, SecurityGroup, SecurityGroupIngressRule,
|
||||
SecurityGroupInstanceAssociation, AuthToken, User,
|
||||
|
@ -531,6 +531,11 @@ class VlanManager(NetworkManager):
|
||||
' than 4094'))
|
||||
|
||||
fixed_net = IPy.IP(cidr)
|
||||
if fixed_net.len() < num_networks * network_size:
|
||||
raise ValueError(_('The network range is not big enough to fit '
|
||||
'%(num_networks)s. Network size is %(network_size)s' %
|
||||
locals()))
|
||||
|
||||
fixed_net_v6 = IPy.IP(cidr_v6)
|
||||
network_size_v6 = 1 << 64
|
||||
significant_bits_v6 = 64
|
||||
|
@ -27,7 +27,7 @@ from nova import utils
|
||||
from nova.virt.vmwareapi_conn import VMWareAPISession
|
||||
from nova.virt.vmwareapi.network_utils import NetworkHelper
|
||||
|
||||
LOG = logging.getLogger("nova.vmwareapi_net")
|
||||
LOG = logging.getLogger("nova.network.vmwareapi_net")
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_string('vlan_interface', 'vmnic0',
|
||||
@ -35,34 +35,6 @@ flags.DEFINE_string('vlan_interface', 'vmnic0',
|
||||
'vlan networking')
|
||||
|
||||
|
||||
def metadata_forward():
|
||||
pass
|
||||
|
||||
|
||||
def init_host():
|
||||
pass
|
||||
|
||||
|
||||
def bind_floating_ip(floating_ip, check_exit_code=True):
|
||||
pass
|
||||
|
||||
|
||||
def unbind_floating_ip(floating_ip):
|
||||
pass
|
||||
|
||||
|
||||
def ensure_vlan_forward(public_ip, port, private_ip):
|
||||
pass
|
||||
|
||||
|
||||
def ensure_floating_forward(floating_ip, fixed_ip):
|
||||
pass
|
||||
|
||||
|
||||
def remove_floating_forward(floating_ip, fixed_ip):
|
||||
pass
|
||||
|
||||
|
||||
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
|
||||
"""Create a vlan and bridge unless they already exist"""
|
||||
#open vmwareapi session
|
||||
@ -77,48 +49,49 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
|
||||
session = VMWareAPISession(host_ip, host_username, host_password,
|
||||
FLAGS.vmwareapi_api_retry_count)
|
||||
vlan_interface = FLAGS.vlan_interface
|
||||
#check whether bridge already exists
|
||||
#retrieve network whose name_label is "bridge"
|
||||
#Check if the vlan_interface physical network adapter exists on the host
|
||||
if not NetworkHelper.check_if_vlan_interface_exists(session,
|
||||
vlan_interface):
|
||||
raise Exception(_("There is no physical network adapter with the name"
|
||||
" %s on the ESX host") % vlan_interface)
|
||||
#check whether bridge already exists and retrieve the the ref of the
|
||||
#network whose name_label is "bridge"
|
||||
network_ref = NetworkHelper.get_network_with_the_name(session, bridge)
|
||||
|
||||
#Get the vSwitch associated with the Physical Adapter
|
||||
vswitch_associated = NetworkHelper.get_vswitch_for_vlan_interface(
|
||||
session, vlan_interface)
|
||||
if vswitch_associated is None:
|
||||
raise Exception(_("There is no virtual switch associated with "
|
||||
"the physical network adapter with name %s") %
|
||||
vlan_interface)
|
||||
if network_ref == None:
|
||||
#Create a port group on the vSwitch associated with the vlan_interface
|
||||
#corresponding physical network adapter on the ESX host
|
||||
vswitches = NetworkHelper.get_vswitches_for_vlan_interface(session,
|
||||
vlan_interface)
|
||||
if len(vswitches) == 0:
|
||||
raise Exception(_("There is no virtual switch connected "
|
||||
"to the physical network adapter with name %s") %
|
||||
vlan_interface)
|
||||
#Assuming physical network interface is associated with only one
|
||||
#virtual switch
|
||||
NetworkHelper.create_port_group(session, bridge, vswitches[0],
|
||||
vlan_num)
|
||||
NetworkHelper.create_port_group(session, bridge, vswitch_associated,
|
||||
vlan_num)
|
||||
else:
|
||||
#check VLAN tag is appropriate
|
||||
is_vlan_proper, ret_vlan_id = NetworkHelper.check_if_vlan_id_is_proper(
|
||||
session, bridge, vlan_num)
|
||||
if not is_vlan_proper:
|
||||
raise Exception(_("VLAN tag not appropriate for the port group "
|
||||
"%(bridge)s. Expected VLAN tag is %(vlan_num)s, "
|
||||
"but the one associated with the port group is"
|
||||
" %(ret_vlan_id)s") % locals())
|
||||
#Get the vlan id and vswitch corresponding to the port group
|
||||
pg_vlanid, pg_vswitch = \
|
||||
NetworkHelper.get_vlanid_and_vswicth_for_portgroup(session, bridge)
|
||||
|
||||
#Check if the vsiwtch associated is proper
|
||||
if pg_vswitch != vswitch_associated:
|
||||
raise Exception(_("vSwitch which contains the port group "
|
||||
"%(bridge)s is not associated with the desired "
|
||||
"physical adapter. Expected vSwitch is "
|
||||
"%(vswitch_associated)s, but the one associated"
|
||||
" is %(pg_vswitch)s") %\
|
||||
{"bridge": bridge,
|
||||
"vswitch_associated": vswitch_associated,
|
||||
"pg_vswitch": pg_vswitch})
|
||||
|
||||
def ensure_vlan(vlan_num):
|
||||
pass
|
||||
|
||||
|
||||
def ensure_bridge(bridge, interface, net_attrs=None):
|
||||
pass
|
||||
|
||||
|
||||
def get_dhcp_hosts(context, network_id):
|
||||
pass
|
||||
|
||||
|
||||
def update_dhcp(context, network_id):
|
||||
pass
|
||||
|
||||
|
||||
def update_ra(context, network_id):
|
||||
pass
|
||||
#Check if the vlan id is proper for the port group
|
||||
if pg_vlanid != vlan_num:
|
||||
raise Exception(_("VLAN tag is not appropriate for the port "
|
||||
"group %(bridge)s. Expected VLAN tag is "
|
||||
"%(vlan_num)s, but the one associated with the "
|
||||
"port group is %(pg_vlanid)s") %\
|
||||
{"bridge": bridge,
|
||||
"vlan_num": vlan_num,
|
||||
"pg_vlanid": pg_vlanid})
|
||||
|
@ -19,6 +19,7 @@
|
||||
Test suites for 'common' code used throughout the OpenStack HTTP API.
|
||||
"""
|
||||
|
||||
import webob.exc
|
||||
|
||||
from webob import Request
|
||||
|
||||
@ -160,3 +161,23 @@ class LimiterTest(test.TestCase):
|
||||
self.assertEqual(limited(items, req, max_limit=2000), items[3:])
|
||||
req = Request.blank('/?offset=3000&limit=10')
|
||||
self.assertEqual(limited(items, req, max_limit=2000), [])
|
||||
|
||||
def test_limiter_negative_limit(self):
|
||||
"""
|
||||
Test a negative limit.
|
||||
"""
|
||||
def _limit_large():
|
||||
limited(self.large, req, max_limit=2000)
|
||||
|
||||
req = Request.blank('/?limit=-3000')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
|
||||
|
||||
def test_limiter_negative_offset(self):
|
||||
"""
|
||||
Test a negative offset.
|
||||
"""
|
||||
def _limit_large():
|
||||
limited(self.large, req, max_limit=2000)
|
||||
|
||||
req = Request.blank('/?offset=-30')
|
||||
self.assertRaises(webob.exc.HTTPBadRequest, _limit_large)
|
||||
|
@ -20,6 +20,8 @@ import webob
|
||||
|
||||
from nova import test
|
||||
import nova.api
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova.api.openstack import flavors
|
||||
from nova.tests.api.openstack import fakes
|
||||
|
||||
@ -33,6 +35,7 @@ class FlavorsTest(test.TestCase):
|
||||
fakes.stub_out_networking(self.stubs)
|
||||
fakes.stub_out_rate_limiting(self.stubs)
|
||||
fakes.stub_out_auth(self.stubs)
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
@ -41,6 +44,9 @@ class FlavorsTest(test.TestCase):
|
||||
def test_get_flavor_list(self):
|
||||
req = webob.Request.blank('/v1.0/flavors')
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 200)
|
||||
|
||||
def test_get_flavor_by_id(self):
|
||||
pass
|
||||
req = webob.Request.blank('/v1.0/flavors/1')
|
||||
res = req.get_response(fakes.wsgi_app())
|
||||
self.assertEqual(res.status_int, 200)
|
||||
|
@ -20,13 +20,22 @@
|
||||
import time
|
||||
|
||||
from nova import db
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.compute import instance_types
|
||||
|
||||
|
||||
def stub_out_db_instance_api(stubs):
|
||||
""" Stubs out the db API for creating Instances """
|
||||
|
||||
INSTANCE_TYPES = {
|
||||
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
|
||||
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
|
||||
'm1.medium':
|
||||
dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
|
||||
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
|
||||
'm1.xlarge':
|
||||
dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
|
||||
|
||||
class FakeModel(object):
|
||||
""" Stubs out for model """
|
||||
def __init__(self, values):
|
||||
@ -41,10 +50,16 @@ def stub_out_db_instance_api(stubs):
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
def fake_instance_type_get_all(context, inactive=0):
|
||||
return INSTANCE_TYPES
|
||||
|
||||
def fake_instance_type_get_by_name(context, name):
|
||||
return INSTANCE_TYPES[name]
|
||||
|
||||
def fake_instance_create(values):
|
||||
""" Stubs out the db.instance_create method """
|
||||
|
||||
type_data = instance_types.INSTANCE_TYPES[values['instance_type']]
|
||||
type_data = INSTANCE_TYPES[values['instance_type']]
|
||||
|
||||
base_options = {
|
||||
'name': values['name'],
|
||||
@ -73,3 +88,5 @@ def stub_out_db_instance_api(stubs):
|
||||
|
||||
stubs.Set(db, 'instance_create', fake_instance_create)
|
||||
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
|
||||
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
|
||||
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
|
||||
|
@ -267,7 +267,7 @@ class CloudTestCase(test.TestCase):
|
||||
self._create_key('test1')
|
||||
self._create_key('test2')
|
||||
result = self.cloud.describe_key_pairs(self.context)
|
||||
keys = result["keypairsSet"]
|
||||
keys = result["keySet"]
|
||||
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
|
||||
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
|
||||
|
||||
|
86
nova/tests/test_instance_types.py
Normal file
86
nova/tests/test_instance_types.py
Normal file
@ -0,0 +1,86 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 Ken Pepple
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Unit Tests for instance types code
|
||||
"""
|
||||
import time
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.compute import instance_types
|
||||
from nova.db.sqlalchemy.session import get_session
|
||||
from nova.db.sqlalchemy import models
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.compute')
|
||||
|
||||
|
||||
class InstanceTypeTestCase(test.TestCase):
|
||||
"""Test cases for instance type code"""
|
||||
def setUp(self):
|
||||
super(InstanceTypeTestCase, self).setUp()
|
||||
session = get_session()
|
||||
max_flavorid = session.query(models.InstanceTypes).\
|
||||
order_by("flavorid desc").\
|
||||
first()
|
||||
self.flavorid = max_flavorid["flavorid"] + 1
|
||||
self.name = str(int(time.time()))
|
||||
|
||||
def test_instance_type_create_then_delete(self):
|
||||
"""Ensure instance types can be created"""
|
||||
starting_inst_list = instance_types.get_all_types()
|
||||
instance_types.create(self.name, 256, 1, 120, self.flavorid)
|
||||
new = instance_types.get_all_types()
|
||||
self.assertNotEqual(len(starting_inst_list),
|
||||
len(new),
|
||||
'instance type was not created')
|
||||
instance_types.destroy(self.name)
|
||||
self.assertEqual(1,
|
||||
instance_types.get_instance_type(self.name)["deleted"])
|
||||
self.assertEqual(starting_inst_list, instance_types.get_all_types())
|
||||
instance_types.purge(self.name)
|
||||
self.assertEqual(len(starting_inst_list),
|
||||
len(instance_types.get_all_types()),
|
||||
'instance type not purged')
|
||||
|
||||
def test_get_all_instance_types(self):
|
||||
"""Ensures that all instance types can be retrieved"""
|
||||
session = get_session()
|
||||
total_instance_types = session.query(models.InstanceTypes).\
|
||||
count()
|
||||
inst_types = instance_types.get_all_types()
|
||||
self.assertEqual(total_instance_types, len(inst_types))
|
||||
|
||||
def test_invalid_create_args_should_fail(self):
|
||||
"""Ensures that instance type creation fails with invalid args"""
|
||||
self.assertRaises(
|
||||
exception.InvalidInputException,
|
||||
instance_types.create, self.name, 0, 1, 120, self.flavorid)
|
||||
self.assertRaises(
|
||||
exception.InvalidInputException,
|
||||
instance_types.create, self.name, 256, -1, 120, self.flavorid)
|
||||
self.assertRaises(
|
||||
exception.InvalidInputException,
|
||||
instance_types.create, self.name, 256, 1, "aa", self.flavorid)
|
||||
|
||||
def test_non_existant_inst_type_shouldnt_delete(self):
|
||||
"""Ensures that instance type creation fails with invalid args"""
|
||||
self.assertRaises(exception.ApiError,
|
||||
instance_types.destroy, "sfsfsdfdfs")
|
@ -74,19 +74,30 @@ class QuotaTestCase(test.TestCase):
|
||||
vol['size'] = size
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
||||
def _get_instance_type(self, name):
|
||||
instance_types = {
|
||||
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
|
||||
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
|
||||
'm1.medium':
|
||||
dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
|
||||
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
|
||||
'm1.xlarge':
|
||||
dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
|
||||
return instance_types[name]
|
||||
|
||||
def test_quota_overrides(self):
|
||||
"""Make sure overriding a projects quotas works"""
|
||||
num_instances = quota.allowed_instances(self.context, 100,
|
||||
instance_types.INSTANCE_TYPES['m1.small'])
|
||||
self._get_instance_type('m1.small'))
|
||||
self.assertEqual(num_instances, 2)
|
||||
db.quota_create(self.context, {'project_id': self.project.id,
|
||||
'instances': 10})
|
||||
num_instances = quota.allowed_instances(self.context, 100,
|
||||
instance_types.INSTANCE_TYPES['m1.small'])
|
||||
self._get_instance_type('m1.small'))
|
||||
self.assertEqual(num_instances, 4)
|
||||
db.quota_update(self.context, self.project.id, {'cores': 100})
|
||||
num_instances = quota.allowed_instances(self.context, 100,
|
||||
instance_types.INSTANCE_TYPES['m1.small'])
|
||||
self._get_instance_type('m1.small'))
|
||||
self.assertEqual(num_instances, 10)
|
||||
|
||||
# metadata_items
|
||||
|
@ -26,7 +26,6 @@ from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import instance_types
|
||||
from nova.compute import power_state
|
||||
from nova.tests.glance import stubs as glance_stubs
|
||||
from nova.tests.vmwareapi import db_fakes
|
||||
@ -74,7 +73,7 @@ class VMWareAPIVMTestCase(test.TestCase):
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||
}
|
||||
self.instance = db.instance_create(values)
|
||||
self.type_data = instance_types.INSTANCE_TYPES[values['instance_type']]
|
||||
self.type_data = db.instance_type_get_by_name(None, 'm1.large')
|
||||
self.conn.spawn(self.instance)
|
||||
self._check_vm_record()
|
||||
|
||||
|
@ -233,7 +233,7 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
vm = vms[0]
|
||||
|
||||
# Check that m1.large above turned into the right thing.
|
||||
instance_type = instance_types.INSTANCE_TYPES['m1.large']
|
||||
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
|
||||
mem_kib = long(instance_type['memory_mb']) << 10
|
||||
mem_bytes = str(mem_kib << 10)
|
||||
vcpus = instance_type['vcpus']
|
||||
|
@ -23,12 +23,20 @@ import time
|
||||
|
||||
from nova import db
|
||||
from nova import utils
|
||||
from nova.compute import instance_types
|
||||
|
||||
|
||||
def stub_out_db_instance_api(stubs):
|
||||
""" Stubs out the db API for creating Instances """
|
||||
|
||||
INSTANCE_TYPES = {
|
||||
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
|
||||
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
|
||||
'm1.medium':
|
||||
dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
|
||||
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
|
||||
'm1.xlarge':
|
||||
dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
|
||||
|
||||
class FakeModel(object):
|
||||
""" Stubs out for model """
|
||||
|
||||
@ -47,7 +55,7 @@ def stub_out_db_instance_api(stubs):
|
||||
def fake_instance_create(values):
|
||||
""" Stubs out the db.instance_create method """
|
||||
|
||||
type_data = instance_types.INSTANCE_TYPES[values['instance_type']]
|
||||
type_data = INSTANCE_TYPES[values['instance_type']]
|
||||
|
||||
base_options = {
|
||||
'name': values['name'],
|
||||
@ -86,8 +94,16 @@ def stub_out_db_instance_api(stubs):
|
||||
""" Stubs out the db.instance_get_fixed_address method """
|
||||
return '10.10.10.10'
|
||||
|
||||
def fake_instance_type_get_all(context, inactive=0):
|
||||
return INSTANCE_TYPES
|
||||
|
||||
def fake_instance_type_get_by_name(context, name):
|
||||
return INSTANCE_TYPES[name]
|
||||
|
||||
stubs.Set(db, 'instance_create', fake_instance_create)
|
||||
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
|
||||
stubs.Set(db, 'instance_action_create', fake_instance_action_create)
|
||||
stubs.Set(db, 'instance_get_fixed_address',
|
||||
fake_instance_get_fixed_address)
|
||||
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
|
||||
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
|
||||
|
@ -27,7 +27,7 @@ def stubout_instance_snapshot(stubs):
|
||||
def fake_fetch_image(cls, session, instance_id, image, user, project,
|
||||
type):
|
||||
# Stubout wait_for_task
|
||||
def fake_wait_for_task(self, id, task):
|
||||
def fake_wait_for_task(self, task, id):
|
||||
class FakeEvent:
|
||||
|
||||
def send(self, value):
|
||||
|
@ -55,6 +55,7 @@ from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
#from nova import test
|
||||
from nova import utils
|
||||
#from nova.api import context
|
||||
from nova.auth import manager
|
||||
@ -362,7 +363,7 @@ class LibvirtConnection(object):
|
||||
raise exception.APIError("resume not supported for libvirt")
|
||||
|
||||
@exception.wrap_exception
|
||||
def rescue(self, instance):
|
||||
def rescue(self, instance, callback=None):
|
||||
self.destroy(instance, False)
|
||||
|
||||
xml = self.to_xml(instance, rescue=True)
|
||||
@ -392,7 +393,7 @@ class LibvirtConnection(object):
|
||||
return timer.start(interval=0.5, now=True)
|
||||
|
||||
@exception.wrap_exception
|
||||
def unrescue(self, instance):
|
||||
def unrescue(self, instance, callback=None):
|
||||
# NOTE(vish): Because reboot destroys and recreates an instance using
|
||||
# the normal xml file, we can just call reboot here
|
||||
self.reboot(instance)
|
||||
@ -606,7 +607,7 @@ class LibvirtConnection(object):
|
||||
user=user,
|
||||
project=project,
|
||||
size=size)
|
||||
type_data = instance_types.INSTANCE_TYPES[inst['instance_type']]
|
||||
type_data = instance_types.get_instance_type(inst['instance_type'])
|
||||
|
||||
if type_data['local_gb']:
|
||||
self._cache_image(fn=self._create_local,
|
||||
@ -667,7 +668,8 @@ class LibvirtConnection(object):
|
||||
instance['id'])
|
||||
# FIXME(vish): stick this in db
|
||||
instance_type = instance['instance_type']
|
||||
instance_type = instance_types.INSTANCE_TYPES[instance_type]
|
||||
# instance_type = test.INSTANCE_TYPES[instance_type]
|
||||
instance_type = instance_types.get_instance_type(instance_type)
|
||||
ip_address = db.instance_get_fixed_address(context.get_admin_context(),
|
||||
instance['id'])
|
||||
# Assume that the gateway also acts as the dhcp server.
|
||||
|
@ -28,7 +28,8 @@ from nova.virt.vmwareapi import vim
|
||||
from nova.virt.vmwareapi.vim import SessionFaultyException
|
||||
|
||||
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
|
||||
'Network', 'HostSystem', 'Task', 'session', 'files']
|
||||
'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
|
||||
'files']
|
||||
|
||||
_FAKE_FILE_SIZE = 1024
|
||||
|
||||
@ -54,6 +55,7 @@ def reset():
|
||||
else:
|
||||
_db_content[c] = {}
|
||||
create_network()
|
||||
create_host_network_system()
|
||||
create_host()
|
||||
create_datacenter()
|
||||
create_datastore()
|
||||
@ -242,13 +244,33 @@ class Datastore(ManagedObject):
|
||||
self.set("summary.name", "fake-ds")
|
||||
|
||||
|
||||
class HostNetworkSystem(ManagedObject):
|
||||
""" HostNetworkSystem class """
|
||||
|
||||
def __init__(self):
|
||||
ManagedObject.__init__(self, "HostNetworkSystem")
|
||||
self.set("name", "networkSystem")
|
||||
|
||||
pnic_do = DataObject()
|
||||
pnic_do.device = "vmnic0"
|
||||
|
||||
net_info_pnic = DataObject()
|
||||
net_info_pnic.PhysicalNic = [pnic_do]
|
||||
|
||||
self.set("networkInfo.pnic", net_info_pnic)
|
||||
|
||||
|
||||
class HostSystem(ManagedObject):
|
||||
""" Host System class """
|
||||
|
||||
def __init__(self):
|
||||
ManagedObject.__init__(self, "HostSystem")
|
||||
self.set("name", "HostSystem")
|
||||
self.set("configManager.networkSystem", "NetworkSystem")
|
||||
self.set("name", "ha-host")
|
||||
if _db_content.get("HostNetworkSystem", None) is None:
|
||||
create_host_network_system()
|
||||
host_net_key = _db_content["HostNetworkSystem"].keys()[0]
|
||||
host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
|
||||
self.set("configManager.networkSystem", host_net_sys)
|
||||
|
||||
vswitch_do = DataObject()
|
||||
vswitch_do.pnic = ["vmnic0"]
|
||||
@ -324,6 +346,11 @@ class Task(ManagedObject):
|
||||
self.set("info", info)
|
||||
|
||||
|
||||
def create_host_network_system():
|
||||
host_net_system = HostNetworkSystem()
|
||||
_create_object("HostNetworkSystem", host_net_system)
|
||||
|
||||
|
||||
def create_host():
|
||||
host_system = HostSystem()
|
||||
_create_object('HostSystem', host_system)
|
||||
|
@ -48,8 +48,8 @@ class NetworkHelper:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def get_vswitches_for_vlan_interface(cls, session, vlan_interface):
|
||||
""" Gets the list of vswitches associated with the physical
|
||||
def get_vswitch_for_vlan_interface(cls, session, vlan_interface):
|
||||
""" Gets the vswitch associated with the physical
|
||||
network adapter with the name supplied"""
|
||||
#Get the list of vSwicthes on the Host System
|
||||
host_mor = session._call_method(vim_util, "get_objects",
|
||||
@ -57,21 +57,31 @@ class NetworkHelper:
|
||||
vswitches = session._call_method(vim_util,
|
||||
"get_dynamic_property", host_mor,
|
||||
"HostSystem", "config.network.vswitch").HostVirtualSwitch
|
||||
vswicthes_conn_to_physical_nic = []
|
||||
#For each vSwitch check if it is associated with the network adapter
|
||||
#Get the vSwitch associated with the network adapter
|
||||
for elem in vswitches:
|
||||
try:
|
||||
for nic_elem in elem.pnic:
|
||||
if str(nic_elem).split('-')[-1].find(vlan_interface) != -1:
|
||||
vswicthes_conn_to_physical_nic.append(elem.name)
|
||||
return elem.name
|
||||
except Exception:
|
||||
pass
|
||||
return vswicthes_conn_to_physical_nic
|
||||
|
||||
@classmethod
|
||||
def check_if_vlan_id_is_proper(cls, session, pg_name, vlan_id):
|
||||
""" Check if the vlan id associated with the port group matches the
|
||||
vlan tag supplied """
|
||||
def check_if_vlan_interface_exists(cls, session, vlan_interface):
|
||||
""" Checks if the vlan_inteface exists on the esx host """
|
||||
host_net_system_mor = session._call_method(vim_util, "get_objects",
|
||||
"HostSystem", ["configManager.networkSystem"])[0].propSet[0].val
|
||||
physical_nics = session._call_method(vim_util,
|
||||
"get_dynamic_property", host_net_system_mor,
|
||||
"HostNetworkSystem", "networkInfo.pnic").PhysicalNic
|
||||
for pnic in physical_nics:
|
||||
if vlan_interface == pnic.device:
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def get_vlanid_and_vswicth_for_portgroup(cls, session, pg_name):
|
||||
""" Get the vlan id and vswicth associated with the port group """
|
||||
host_mor = session._call_method(vim_util, "get_objects",
|
||||
"HostSystem")[0].obj
|
||||
port_grps_on_host = session._call_method(vim_util,
|
||||
@ -79,10 +89,8 @@ class NetworkHelper:
|
||||
"HostSystem", "config.network.portgroup").HostPortGroup
|
||||
for p_gp in port_grps_on_host:
|
||||
if p_gp.spec.name == pg_name:
|
||||
if p_gp.spec.vlanId == vlan_id:
|
||||
return True, vlan_id
|
||||
else:
|
||||
return False, p_gp.spec.vlanId
|
||||
p_grp_vswitch_name = p_gp.vswitch.split("-")[-1]
|
||||
return p_gp.spec.vlanId, p_grp_vswitch_name
|
||||
|
||||
@classmethod
|
||||
def create_port_group(cls, session, pg_name, vswitch_name, vlan_id=0):
|
||||
|
@ -20,111 +20,74 @@ The VMware API utility module
|
||||
"""
|
||||
|
||||
|
||||
def build_recursive_traversal_spec(client_factory):
|
||||
"""Builds the Traversal Spec"""
|
||||
#Traversal through "hostFolder" branch
|
||||
visit_folders_select_spec = client_factory.create('ns0:SelectionSpec')
|
||||
visit_folders_select_spec.name = "visitFolders"
|
||||
dc_to_hf = client_factory.create('ns0:TraversalSpec')
|
||||
dc_to_hf.name = "dc_to_hf"
|
||||
dc_to_hf.type = "Datacenter"
|
||||
dc_to_hf.path = "hostFolder"
|
||||
dc_to_hf.skip = False
|
||||
dc_to_hf.selectSet = [visit_folders_select_spec]
|
||||
def build_selcetion_spec(client_factory, name):
|
||||
""" Builds the selection spec """
|
||||
sel_spec = client_factory.create('ns0:SelectionSpec')
|
||||
sel_spec.name = name
|
||||
return sel_spec
|
||||
|
||||
#Traversal through "vmFolder" branch
|
||||
visit_folders_select_spec = client_factory.create('ns0:SelectionSpec')
|
||||
visit_folders_select_spec.name = "visitFolders"
|
||||
dc_to_vmf = client_factory.create('ns0:TraversalSpec')
|
||||
dc_to_vmf.name = "dc_to_vmf"
|
||||
dc_to_vmf.type = "Datacenter"
|
||||
dc_to_vmf.path = "vmFolder"
|
||||
dc_to_vmf.skip = False
|
||||
dc_to_vmf.selectSet = [visit_folders_select_spec]
|
||||
|
||||
#Traversal to the DataStore from the DataCenter
|
||||
visit_folders_select_spec = \
|
||||
client_factory.create('ns0:SelectionSpec')
|
||||
visit_folders_select_spec.name = "traverseChild"
|
||||
dc_to_ds = client_factory.create('ns0:TraversalSpec')
|
||||
dc_to_ds.name = "dc_to_ds"
|
||||
dc_to_ds.type = "Datacenter"
|
||||
dc_to_ds.path = "datastore"
|
||||
dc_to_ds.skip = False
|
||||
dc_to_ds.selectSet = [visit_folders_select_spec]
|
||||
|
||||
#Traversal through "vm" branch
|
||||
visit_folders_select_spec = \
|
||||
client_factory.create('ns0:SelectionSpec')
|
||||
visit_folders_select_spec.name = "visitFolders"
|
||||
h_to_vm = client_factory.create('ns0:TraversalSpec')
|
||||
h_to_vm.name = "h_to_vm"
|
||||
h_to_vm.type = "HostSystem"
|
||||
h_to_vm.path = "vm"
|
||||
h_to_vm.skip = False
|
||||
h_to_vm.selectSet = [visit_folders_select_spec]
|
||||
|
||||
#Traversal through "host" branch
|
||||
cr_to_h = client_factory.create('ns0:TraversalSpec')
|
||||
cr_to_h.name = "cr_to_h"
|
||||
cr_to_h.type = "ComputeResource"
|
||||
cr_to_h.path = "host"
|
||||
cr_to_h.skip = False
|
||||
cr_to_h.selectSet = []
|
||||
|
||||
cr_to_ds = client_factory.create('ns0:TraversalSpec')
|
||||
cr_to_ds.name = "cr_to_ds"
|
||||
cr_to_ds.type = "ComputeResource"
|
||||
cr_to_ds.path = "datastore"
|
||||
cr_to_ds.skip = False
|
||||
|
||||
#Traversal through "resourcePool" branch
|
||||
rp_to_rp_select_spec = client_factory.create('ns0:SelectionSpec')
|
||||
rp_to_rp_select_spec.name = "rp_to_rp"
|
||||
rp_to_vm_select_spec = client_factory.create('ns0:SelectionSpec')
|
||||
rp_to_vm_select_spec.name = "rp_to_vm"
|
||||
cr_to_rp = client_factory.create('ns0:TraversalSpec')
|
||||
cr_to_rp.name = "cr_to_rp"
|
||||
cr_to_rp.type = "ComputeResource"
|
||||
cr_to_rp.path = "resourcePool"
|
||||
cr_to_rp.skip = False
|
||||
cr_to_rp.selectSet = [rp_to_rp_select_spec, rp_to_vm_select_spec]
|
||||
|
||||
#Traversal through all ResourcePools
|
||||
rp_to_rp_select_spec = client_factory.create('ns0:SelectionSpec')
|
||||
rp_to_rp_select_spec.name = "rp_to_rp"
|
||||
rp_to_vm_select_spec = client_factory.create('ns0:SelectionSpec')
|
||||
rp_to_vm_select_spec.name = "rp_to_vm"
|
||||
rp_to_rp = client_factory.create('ns0:TraversalSpec')
|
||||
rp_to_rp.name = "rp_to_rp"
|
||||
rp_to_rp.type = "ResourcePool"
|
||||
rp_to_rp.path = "resourcePool"
|
||||
rp_to_rp.skip = False
|
||||
rp_to_rp.selectSet = [rp_to_rp_select_spec, rp_to_vm_select_spec]
|
||||
|
||||
#Traversal through ResourcePools vm folders
|
||||
rp_to_rp_select_spec = client_factory.create('ns0:SelectionSpec')
|
||||
rp_to_rp_select_spec.name = "rp_to_rp"
|
||||
rp_to_vm_select_spec = client_factory.create('ns0:SelectionSpec')
|
||||
rp_to_vm_select_spec.name = "rp_to_vm"
|
||||
rp_to_vm = client_factory.create('ns0:TraversalSpec')
|
||||
rp_to_vm.name = "rp_to_vm"
|
||||
rp_to_vm.type = "ResourcePool"
|
||||
rp_to_vm.path = "vm"
|
||||
rp_to_vm.skip = False
|
||||
rp_to_vm.selectSet = [rp_to_rp_select_spec, rp_to_vm_select_spec]
|
||||
|
||||
#Include all Traversals and Recurse into them
|
||||
visit_folders_select_spec = \
|
||||
client_factory.create('ns0:SelectionSpec')
|
||||
visit_folders_select_spec.name = "visitFolders"
|
||||
def build_traversal_spec(client_factory, name, type, path, skip, select_set):
|
||||
""" Builds the traversal spec object """
|
||||
traversal_spec = client_factory.create('ns0:TraversalSpec')
|
||||
traversal_spec.name = "visitFolders"
|
||||
traversal_spec.type = "Folder"
|
||||
traversal_spec.path = "childEntity"
|
||||
traversal_spec.skip = False
|
||||
traversal_spec.selectSet = [visit_folders_select_spec, dc_to_hf, dc_to_vmf,
|
||||
cr_to_ds, cr_to_h, cr_to_rp, rp_to_rp, h_to_vm, rp_to_vm]
|
||||
traversal_spec.name= name
|
||||
traversal_spec.type = type
|
||||
traversal_spec.path = path
|
||||
traversal_spec.skip = skip
|
||||
traversal_spec.selectSet = select_set
|
||||
return traversal_spec
|
||||
|
||||
|
||||
def build_recursive_traversal_spec(client_factory):
|
||||
""" Builds the Recursive Traversal Spec to traverse the object managed
|
||||
object hierarchy """
|
||||
visit_folders_select_spec = build_selcetion_spec(client_factory,
|
||||
"visitFolders")
|
||||
#For getting to hostFolder from datacnetr
|
||||
dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter",
|
||||
"hostFolder", False,
|
||||
[visit_folders_select_spec])
|
||||
#For getting to vmFolder from datacenter
|
||||
dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter",
|
||||
"vmFolder", False,
|
||||
[visit_folders_select_spec])
|
||||
#For getting Host System to virtual machine
|
||||
h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem",
|
||||
"vm", False,
|
||||
[visit_folders_select_spec])
|
||||
|
||||
#For getting to Host System from Compute Resource
|
||||
cr_to_h = build_traversal_spec(client_factory, "cr_to_h",
|
||||
"ComputeResource", "host", False, [])
|
||||
|
||||
#For getting to datastore from Compute Resource
|
||||
cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds",
|
||||
"ComputeResource", "datastore", False, [])
|
||||
|
||||
rp_to_rp_select_spec = build_selcetion_spec(client_factory, "rp_to_rp")
|
||||
rp_to_vm_select_spec = build_selcetion_spec(client_factory, "rp_to_vm")
|
||||
#For getting to resource pool from Compute Resource
|
||||
cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp",
|
||||
"ComputeResource", "resourcePool", False,
|
||||
[rp_to_rp_select_spec, rp_to_vm_select_spec])
|
||||
|
||||
#For getting to child res pool from the parent res pool
|
||||
rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool",
|
||||
"resourcePool", False,
|
||||
[rp_to_rp_select_spec, rp_to_vm_select_spec])
|
||||
|
||||
#For getting to Virtual Machine from the Resource Pool
|
||||
rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool",
|
||||
"vm", False,
|
||||
[rp_to_rp_select_spec, rp_to_vm_select_spec])
|
||||
|
||||
#Get the assorted traversal spec which takes care of the objects to
|
||||
#be searched for from the root folder
|
||||
traversal_spec = build_traversal_spec(client_factory, "visitFolders",
|
||||
"Folder", "childEntity", False,
|
||||
[visit_folders_select_spec, dc_to_hf,
|
||||
dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,
|
||||
rp_to_rp, h_to_vm, rp_to_vm])
|
||||
return traversal_spec
|
||||
|
||||
|
||||
|
@ -104,75 +104,99 @@ class VMWareVMOps(object):
|
||||
"but that already exists on the host") % instance.name)
|
||||
|
||||
client_factory = self._session._get_vim().client.factory
|
||||
service_content = self._session._get_vim().get_service_content()
|
||||
|
||||
network = db.network_get_by_instance(context.get_admin_context(),
|
||||
instance['id'])
|
||||
|
||||
net_name = network['bridge']
|
||||
network_ref = \
|
||||
NetworkHelper.get_network_with_the_name(self._session, net_name)
|
||||
if network_ref is None:
|
||||
raise Exception(_("Network with the name '%s' doesn't exist on"
|
||||
" the ESX host") % net_name)
|
||||
#Get the Size of the flat vmdk file that is there on the storage
|
||||
#repository.
|
||||
image_size, image_properties = \
|
||||
vmware_images.get_vmdk_size_and_properties(instance.image_id,
|
||||
instance)
|
||||
vmdk_file_size_in_kb = int(image_size) / 1024
|
||||
os_type = image_properties.get("vmware_ostype", "otherGuest")
|
||||
adapter_type = image_properties.get("vmware_adaptertype", "lsiLogic")
|
||||
|
||||
# Get the datastore list and choose the first local storage
|
||||
data_stores = self._session._call_method(vim_util, "get_objects",
|
||||
"Datastore", ["summary.type", "summary.name"])
|
||||
data_store_name = None
|
||||
for elem in data_stores:
|
||||
ds_name = None
|
||||
ds_type = None
|
||||
for prop in elem.propSet:
|
||||
if prop.name == "summary.type":
|
||||
ds_type = prop.val
|
||||
elif prop.name == "summary.name":
|
||||
ds_name = prop.val
|
||||
#Local storage identifier
|
||||
if ds_type == "VMFS":
|
||||
data_store_name = ds_name
|
||||
break
|
||||
def _check_if_network_bridge_exists():
|
||||
network_ref = \
|
||||
NetworkHelper.get_network_with_the_name(self._session,
|
||||
net_name)
|
||||
if network_ref is None:
|
||||
raise Exception(_("Network with the name '%s' doesn't exist on"
|
||||
" the ESX host") % net_name)
|
||||
|
||||
if data_store_name is None:
|
||||
msg = _("Couldn't get a local Datastore reference")
|
||||
LOG.exception(msg)
|
||||
raise Exception(msg)
|
||||
_check_if_network_bridge_exists()
|
||||
|
||||
def _get_datastore_ref():
|
||||
# Get the datastore list and choose the first local storage
|
||||
data_stores = self._session._call_method(vim_util, "get_objects",
|
||||
"Datastore", ["summary.type", "summary.name"])
|
||||
for elem in data_stores:
|
||||
ds_name = None
|
||||
ds_type = None
|
||||
for prop in elem.propSet:
|
||||
if prop.name == "summary.type":
|
||||
ds_type = prop.val
|
||||
elif prop.name == "summary.name":
|
||||
ds_name = prop.val
|
||||
#Local storage identifier
|
||||
if ds_type == "VMFS":
|
||||
data_store_name = ds_name
|
||||
return data_store_name
|
||||
|
||||
if data_store_name is None:
|
||||
msg = _("Couldn't get a local Datastore reference")
|
||||
LOG.exception(msg)
|
||||
raise Exception(msg)
|
||||
|
||||
data_store_name = _get_datastore_ref()
|
||||
|
||||
def _get_image_properties():
|
||||
#Get the Size of the flat vmdk file that is there on the storage
|
||||
#repository.
|
||||
image_size, image_properties = \
|
||||
vmware_images.get_vmdk_size_and_properties(
|
||||
instance.image_id, instance)
|
||||
vmdk_file_size_in_kb = int(image_size) / 1024
|
||||
os_type = image_properties.get("vmware_ostype", "otherGuest")
|
||||
adapter_type = image_properties.get("vmware_adaptertype",
|
||||
"lsiLogic")
|
||||
return vmdk_file_size_in_kb, os_type, adapter_type
|
||||
|
||||
vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties()
|
||||
|
||||
def _get_vmfolder_and_res_pool_mors():
|
||||
#Get the Vm folder ref from the datacenter
|
||||
dc_objs = self._session._call_method(vim_util, "get_objects",
|
||||
"Datacenter", ["vmFolder"])
|
||||
#There is only one default datacenter in a standalone ESX host
|
||||
vm_folder_mor = dc_objs[0].propSet[0].val
|
||||
|
||||
#Get the resource pool. Taking the first resource pool coming our
|
||||
#way. Assuming that is the default resource pool.
|
||||
res_pool_mor = self._session._call_method(vim_util, "get_objects",
|
||||
"ResourcePool")[0].obj
|
||||
return vm_folder_mor, res_pool_mor
|
||||
|
||||
vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
|
||||
|
||||
#Get the create vm config spec
|
||||
config_spec = vm_util.get_vm_create_spec(client_factory, instance,
|
||||
data_store_name, net_name, os_type)
|
||||
|
||||
#Get the Vm folder ref from the datacenter
|
||||
dc_objs = self._session._call_method(vim_util, "get_objects",
|
||||
"Datacenter", ["vmFolder"])
|
||||
#There is only one default datacenter in a standalone ESX host
|
||||
vm_folder_ref = dc_objs[0].propSet[0].val
|
||||
def _execute_create_vm():
|
||||
LOG.debug(_("Creating VM with the name %s on the ESX host") %
|
||||
instance.name)
|
||||
#Create the VM on the ESX host
|
||||
vm_create_task = self._session._call_method(
|
||||
self._session._get_vim(),
|
||||
"CreateVM_Task", vm_folder_mor,
|
||||
config=config_spec, pool=res_pool_mor)
|
||||
self._session._wait_for_task(instance.id, vm_create_task)
|
||||
|
||||
#Get the resource pool. Taking the first resource pool coming our way.
|
||||
#Assuming that is the default resource pool.
|
||||
res_pool_mor = self._session._call_method(vim_util, "get_objects",
|
||||
"ResourcePool")[0].obj
|
||||
LOG.debug(_("Created VM with the name %s on the ESX host") %
|
||||
instance.name)
|
||||
|
||||
LOG.debug(_("Creating VM with the name %s on the ESX host") %
|
||||
instance.name)
|
||||
#Create the VM on the ESX host
|
||||
vm_create_task = self._session._call_method(self._session._get_vim(),
|
||||
"CreateVM_Task", vm_folder_ref,
|
||||
config=config_spec, pool=res_pool_mor)
|
||||
self._session._wait_for_task(instance.id, vm_create_task)
|
||||
|
||||
LOG.debug(_("Created VM with the name %s on the ESX host") %
|
||||
instance.name)
|
||||
_execute_create_vm()
|
||||
|
||||
# Set the machine id for the VM for setting the IP
|
||||
self._set_machine_id(client_factory, instance)
|
||||
|
||||
#Naming the VM files in correspondence with the VM instance name
|
||||
|
||||
# The flat vmdk file name
|
||||
flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name,
|
||||
instance.name)
|
||||
@ -183,79 +207,111 @@ class VMWareVMOps(object):
|
||||
uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
|
||||
uploaded_vmdk_name)
|
||||
|
||||
#Create a Virtual Disk of the size of the flat vmdk file. This is done
|
||||
#just created to generate the meta-data file whose specifics
|
||||
#depend on the size of the disk, thin/thick provisioning and the
|
||||
#storage adapter type.
|
||||
#Here we assume thick provisioning and lsiLogic for the adapter type
|
||||
LOG.debug(_("Creating Virtual Disk of size %(vmdk_file_size_in_kb)s "
|
||||
"KB and adapter type %(adapter_type)s on "
|
||||
"the ESX host local store %(data_store_name)s") % locals())
|
||||
vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory,
|
||||
vmdk_file_size_in_kb, adapter_type)
|
||||
vmdk_create_task = self._session._call_method(self._session._get_vim(),
|
||||
"CreateVirtualDisk_Task",
|
||||
self._session._get_vim().get_service_content().virtualDiskManager,
|
||||
name=uploaded_vmdk_path,
|
||||
datacenter=self._get_datacenter_name_and_ref()[0],
|
||||
spec=vmdk_create_spec)
|
||||
self._session._wait_for_task(instance.id, vmdk_create_task)
|
||||
LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s "
|
||||
"KB on the ESX host local store "
|
||||
"%(data_store_name)s") % locals())
|
||||
def _create_virtual_disk():
|
||||
#Create a Virtual Disk of the size of the flat vmdk file. This is
|
||||
#done just to generate the meta-data file whose specifics
|
||||
#depend on the size of the disk, thin/thick provisioning and the
|
||||
#storage adapter type.
|
||||
#Here we assume thick provisioning and lsiLogic for the adapter
|
||||
#type
|
||||
LOG.debug(_("Creating Virtual Disk of size "
|
||||
"%(vmdk_file_size_in_kb)s KB and adapter type "
|
||||
"%(adapter_type)s on the ESX host local store"
|
||||
" %(data_store_name)s") %
|
||||
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
|
||||
"adapter_type": adapter_type,
|
||||
"data_store_name": data_store_name})
|
||||
vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory,
|
||||
vmdk_file_size_in_kb, adapter_type)
|
||||
vmdk_create_task = self._session._call_method(
|
||||
self._session._get_vim(),
|
||||
"CreateVirtualDisk_Task",
|
||||
service_content.virtualDiskManager,
|
||||
name=uploaded_vmdk_path,
|
||||
datacenter=self._get_datacenter_name_and_ref()[0],
|
||||
spec=vmdk_create_spec)
|
||||
self._session._wait_for_task(instance.id, vmdk_create_task)
|
||||
LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
|
||||
" KB on the ESX host local store "
|
||||
"%(data_store_name)s") %
|
||||
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
|
||||
"data_store_name": data_store_name})
|
||||
|
||||
LOG.debug(_("Deleting the file %(flat_uploaded_vmdk_path)s "
|
||||
"on the ESX host local"
|
||||
"store %(data_store_name)s") % locals())
|
||||
#Delete the -flat.vmdk file created. .vmdk file is retained.
|
||||
vmdk_delete_task = self._session._call_method(self._session._get_vim(),
|
||||
"DeleteDatastoreFile_Task",
|
||||
self._session._get_vim().get_service_content().fileManager,
|
||||
name=flat_uploaded_vmdk_path)
|
||||
self._session._wait_for_task(instance.id, vmdk_delete_task)
|
||||
LOG.debug(_("Deleted the file %(flat_uploaded_vmdk_path)s on the "
|
||||
"ESX host local store %(data_store_name)s") % locals())
|
||||
_create_virtual_disk()
|
||||
|
||||
def _delete_disk_file():
|
||||
LOG.debug(_("Deleting the file %(flat_uploaded_vmdk_path)s "
|
||||
"on the ESX host local"
|
||||
"store %(data_store_name)s") %
|
||||
{"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
|
||||
"data_store_name": data_store_name})
|
||||
#Delete the -flat.vmdk file created. .vmdk file is retained.
|
||||
vmdk_delete_task = self._session._call_method(
|
||||
self._session._get_vim(),
|
||||
"DeleteDatastoreFile_Task",
|
||||
service_content.fileManager,
|
||||
name=flat_uploaded_vmdk_path)
|
||||
self._session._wait_for_task(instance.id, vmdk_delete_task)
|
||||
LOG.debug(_("Deleted the file %(flat_uploaded_vmdk_path)s on the "
|
||||
"ESX host local store %(data_store_name)s") %
|
||||
{"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
|
||||
"data_store_name": data_store_name})
|
||||
|
||||
_delete_disk_file()
|
||||
|
||||
LOG.debug(_("Downloading image file data %(image_id)s to the ESX "
|
||||
"data store %(data_store_name)s") %
|
||||
({'image_id': instance.image_id,
|
||||
'data_store_name': data_store_name}))
|
||||
cookies = self._session._get_vim().client.options.transport.cookiejar
|
||||
# Upload the -flat.vmdk file whose meta-data file we just created above
|
||||
vmware_images.fetch_image(
|
||||
instance.image_id,
|
||||
instance,
|
||||
host=self._session._host_ip,
|
||||
data_center_name=self._get_datacenter_name_and_ref()[1],
|
||||
datastore_name=data_store_name,
|
||||
cookies=cookies,
|
||||
file_path=flat_uploaded_vmdk_name)
|
||||
LOG.debug(_("Downloaded image file data %(image_id)s to the ESX "
|
||||
"data store %(data_store_name)s") %
|
||||
({'image_id': instance.image_id,
|
||||
'data_store_name': data_store_name}))
|
||||
|
||||
#Attach the vmdk uploaded to the VM. VM reconfigure is done to do so.
|
||||
vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
|
||||
client_factory,
|
||||
vmdk_file_size_in_kb, uploaded_vmdk_path,
|
||||
adapter_type)
|
||||
def _fetch_image_on_esx_datastore():
|
||||
LOG.debug(_("Downloading image file data %(image_id)s to the ESX "
|
||||
"data store %(data_store_name)s") %
|
||||
({'image_id': instance.image_id,
|
||||
'data_store_name': data_store_name}))
|
||||
#Upload the -flat.vmdk file whose meta-data file we just created
|
||||
#above
|
||||
vmware_images.fetch_image(
|
||||
instance.image_id,
|
||||
instance,
|
||||
host=self._session._host_ip,
|
||||
data_center_name=self._get_datacenter_name_and_ref()[1],
|
||||
datastore_name=data_store_name,
|
||||
cookies=cookies,
|
||||
file_path=flat_uploaded_vmdk_name)
|
||||
LOG.debug(_("Downloaded image file data %(image_id)s to the ESX "
|
||||
"data store %(data_store_name)s") %
|
||||
({'image_id': instance.image_id,
|
||||
'data_store_name': data_store_name}))
|
||||
_fetch_image_on_esx_datastore()
|
||||
|
||||
vm_ref = self._get_vm_ref_from_the_name(instance.name)
|
||||
LOG.debug(_("Reconfiguring VM instance %s to attach the image "
|
||||
"disk") % instance.name)
|
||||
reconfig_task = self._session._call_method(self._session._get_vim(),
|
||||
"ReconfigVM_Task", vm_ref,
|
||||
spec=vmdk_attach_config_spec)
|
||||
self._session._wait_for_task(instance.id, reconfig_task)
|
||||
LOG.debug(_("Reconfigured VM instance %s to attach the image "
|
||||
"disk") % instance.name)
|
||||
|
||||
LOG.debug(_("Powering on the VM instance %s") % instance.name)
|
||||
#Power On the VM
|
||||
power_on_task = self._session._call_method(self._session._get_vim(),
|
||||
"PowerOnVM_Task", vm_ref)
|
||||
self._session._wait_for_task(instance.id, power_on_task)
|
||||
LOG.debug(_("Powered on the VM instance %s") % instance.name)
|
||||
def _attach_vmdk_to_the_vm():
|
||||
#Attach the vmdk uploaded to the VM. VM reconfigure is done
|
||||
#to do so.
|
||||
vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
|
||||
client_factory,
|
||||
vmdk_file_size_in_kb, uploaded_vmdk_path,
|
||||
adapter_type)
|
||||
LOG.debug(_("Reconfiguring VM instance %s to attach the image "
|
||||
"disk") % instance.name)
|
||||
reconfig_task = self._session._call_method(
|
||||
self._session._get_vim(),
|
||||
"ReconfigVM_Task", vm_ref,
|
||||
spec=vmdk_attach_config_spec)
|
||||
self._session._wait_for_task(instance.id, reconfig_task)
|
||||
LOG.debug(_("Reconfigured VM instance %s to attach the image "
|
||||
"disk") % instance.name)
|
||||
|
||||
_attach_vmdk_to_the_vm()
|
||||
|
||||
def _power_on_vm():
|
||||
LOG.debug(_("Powering on the VM instance %s") % instance.name)
|
||||
#Power On the VM
|
||||
power_on_task = self._session._call_method(
|
||||
self._session._get_vim(),
|
||||
"PowerOnVM_Task", vm_ref)
|
||||
self._session._wait_for_task(instance.id, power_on_task)
|
||||
LOG.debug(_("Powered on the VM instance %s") % instance.name)
|
||||
_power_on_vm()
|
||||
|
||||
def snapshot(self, instance, snapshot_name):
|
||||
"""
|
||||
@ -275,51 +331,65 @@ class VMWareVMOps(object):
|
||||
if vm_ref is None:
|
||||
raise Exception(_("instance - %s not present") % instance.name)
|
||||
|
||||
#Get the vmdk file name that the VM is pointing to
|
||||
hardware_devices = self._session._call_method(vim_util,
|
||||
"get_dynamic_property", vm_ref,
|
||||
"VirtualMachine", "config.hardware.device")
|
||||
client_factory = self._session._get_vim().client.factory
|
||||
vmdk_file_path_before_snapshot, adapter_type = \
|
||||
vm_util.get_vmdk_file_path_and_adapter_type(client_factory,
|
||||
hardware_devices)
|
||||
service_content = self._session._get_vim().get_service_content()
|
||||
|
||||
os_type = self._session._call_method(vim_util,
|
||||
"get_dynamic_property", vm_ref,
|
||||
"VirtualMachine", "summary.config.guestId")
|
||||
#Create a snapshot of the VM
|
||||
LOG.debug(_("Creating Snapshot of the VM instance %s ") %
|
||||
instance.name)
|
||||
snapshot_task = self._session._call_method(self._session._get_vim(),
|
||||
"CreateSnapshot_Task", vm_ref,
|
||||
name="%s-snapshot" % instance.name,
|
||||
description="Taking Snapshot of the VM",
|
||||
memory=True,
|
||||
quiesce=True)
|
||||
self._session._wait_for_task(instance.id, snapshot_task)
|
||||
LOG.debug(_("Created Snapshot of the VM instance %s ") % instance.name)
|
||||
def _get_vm_and_vmdk_attribs():
|
||||
#Get the vmdk file name that the VM is pointing to
|
||||
hardware_devices = self._session._call_method(vim_util,
|
||||
"get_dynamic_property", vm_ref,
|
||||
"VirtualMachine", "config.hardware.device")
|
||||
vmdk_file_path_before_snapshot, adapter_type = \
|
||||
vm_util.get_vmdk_file_path_and_adapter_type(client_factory,
|
||||
hardware_devices)
|
||||
datastore_name = vm_util.split_datastore_path(
|
||||
vmdk_file_path_before_snapshot)[0]
|
||||
os_type = self._session._call_method(vim_util,
|
||||
"get_dynamic_property", vm_ref,
|
||||
"VirtualMachine", "summary.config.guestId")
|
||||
return (vmdk_file_path_before_snapshot, adapter_type,
|
||||
datastore_name, os_type)
|
||||
|
||||
datastore_name = vm_util.split_datastore_path(
|
||||
vmdk_file_path_before_snapshot)[0]
|
||||
#Copy the contents of the VM that were there just before the snapshot
|
||||
#was taken
|
||||
ds_ref = vim_util.get_dynamic_property(self._session._get_vim(),
|
||||
vmdk_file_path_before_snapshot, adapter_type, datastore_name,\
|
||||
os_type = _get_vm_and_vmdk_attribs()
|
||||
|
||||
def _create_vm_snapshot():
|
||||
#Create a snapshot of the VM
|
||||
LOG.debug(_("Creating Snapshot of the VM instance %s ") %
|
||||
instance.name)
|
||||
snapshot_task = self._session._call_method(
|
||||
self._session._get_vim(),
|
||||
"CreateSnapshot_Task", vm_ref,
|
||||
name="%s-snapshot" % instance.name,
|
||||
description="Taking Snapshot of the VM",
|
||||
memory=True,
|
||||
quiesce=True)
|
||||
self._session._wait_for_task(instance.id, snapshot_task)
|
||||
LOG.debug(_("Created Snapshot of the VM instance %s ") %
|
||||
instance.name)
|
||||
|
||||
_create_vm_snapshot()
|
||||
|
||||
def _check_if_tmp_folder_exists():
|
||||
#Copy the contents of the VM that were there just before the
|
||||
#snapshot was taken
|
||||
ds_ref = vim_util.get_dynamic_property(self._session._get_vim(),
|
||||
vm_ref,
|
||||
"VirtualMachine",
|
||||
"datastore").ManagedObjectReference[0]
|
||||
ds_browser = vim_util.get_dynamic_property(self._session._get_vim(),
|
||||
ds_browser = vim_util.get_dynamic_property(
|
||||
self._session._get_vim(),
|
||||
ds_ref,
|
||||
"Datastore",
|
||||
"browser")
|
||||
#Check if the vmware-tmp folder exists or not. If not, create one
|
||||
tmp_folder_path = vm_util.build_datastore_path(datastore_name,
|
||||
"vmware-tmp")
|
||||
if not self._path_exists(ds_browser, tmp_folder_path):
|
||||
self._mkdir(vm_util.build_datastore_path(datastore_name,
|
||||
"vmware-tmp"))
|
||||
#Check if the vmware-tmp folder exists or not. If not, create one
|
||||
tmp_folder_path = vm_util.build_datastore_path(datastore_name,
|
||||
"vmware-tmp")
|
||||
if not self._path_exists(ds_browser, tmp_folder_path):
|
||||
self._mkdir(vm_util.build_datastore_path(datastore_name,
|
||||
"vmware-tmp"))
|
||||
|
||||
copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory,
|
||||
adapter_type)
|
||||
_check_if_tmp_folder_exists()
|
||||
|
||||
#Generate a random vmdk file name to which the coalesced vmdk content
|
||||
#will be copied to. A random name is chosen so that we don't have
|
||||
@ -329,50 +399,64 @@ class VMWareVMOps(object):
|
||||
"vmware-tmp/%s.vmdk" % random_name)
|
||||
dc_ref = self._get_datacenter_name_and_ref()[0]
|
||||
|
||||
#Copy the contents of the disk ( or disks, if there were snapshots
|
||||
#done earlier) to a temporary vmdk file.
|
||||
LOG.debug(_("Copying disk data before snapshot of the VM instance %s")
|
||||
% instance.name)
|
||||
copy_disk_task = self._session._call_method(self._session._get_vim(),
|
||||
"CopyVirtualDisk_Task",
|
||||
self._session._get_vim().get_service_content().virtualDiskManager,
|
||||
sourceName=vmdk_file_path_before_snapshot,
|
||||
sourceDatacenter=dc_ref,
|
||||
destName=dest_vmdk_file_location,
|
||||
destDatacenter=dc_ref,
|
||||
destSpec=copy_spec,
|
||||
force=False)
|
||||
self._session._wait_for_task(instance.id, copy_disk_task)
|
||||
LOG.debug(_("Copied disk data before snapshot of the VM instance %s")
|
||||
% instance.name)
|
||||
def _copy_vmdk_content():
|
||||
#Copy the contents of the disk ( or disks, if there were snapshots
|
||||
#done earlier) to a temporary vmdk file.
|
||||
copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory,
|
||||
adapter_type)
|
||||
LOG.debug(_("Copying disk data before snapshot of the VM "
|
||||
" instance %s") % instance.name)
|
||||
copy_disk_task = self._session._call_method(
|
||||
self._session._get_vim(),
|
||||
"CopyVirtualDisk_Task",
|
||||
service_content.virtualDiskManager,
|
||||
sourceName=vmdk_file_path_before_snapshot,
|
||||
sourceDatacenter=dc_ref,
|
||||
destName=dest_vmdk_file_location,
|
||||
destDatacenter=dc_ref,
|
||||
destSpec=copy_spec,
|
||||
force=False)
|
||||
self._session._wait_for_task(instance.id, copy_disk_task)
|
||||
LOG.debug(_("Copied disk data before snapshot of the VM "
|
||||
"instance %s") % instance.name)
|
||||
|
||||
_copy_vmdk_content()
|
||||
|
||||
cookies = self._session._get_vim().client.options.transport.cookiejar
|
||||
#Upload the contents of -flat.vmdk file which has the disk data.
|
||||
LOG.debug(_("Uploading image %s") % snapshot_name)
|
||||
vmware_images.upload_image(
|
||||
snapshot_name,
|
||||
instance,
|
||||
os_type=os_type,
|
||||
adapter_type=adapter_type,
|
||||
image_version=1,
|
||||
host=self._session._host_ip,
|
||||
data_center_name=self._get_datacenter_name_and_ref()[1],
|
||||
datastore_name=datastore_name,
|
||||
cookies=cookies,
|
||||
file_path="vmware-tmp/%s-flat.vmdk" % random_name)
|
||||
LOG.debug(_("Uploaded image %s") % snapshot_name)
|
||||
|
||||
#Delete the temporary vmdk created above.
|
||||
LOG.debug(_("Deleting temporary vmdk file %s")
|
||||
% dest_vmdk_file_location)
|
||||
remove_disk_task = self._session._call_method(self._session._get_vim(),
|
||||
"DeleteVirtualDisk_Task",
|
||||
self._session._get_vim().get_service_content().virtualDiskManager,
|
||||
name=dest_vmdk_file_location,
|
||||
datacenter=dc_ref)
|
||||
self._session._wait_for_task(instance.id, remove_disk_task)
|
||||
LOG.debug(_("Deleted temporary vmdk file %s")
|
||||
% dest_vmdk_file_location)
|
||||
def _upload_vmdk_to_image_repository():
|
||||
#Upload the contents of -flat.vmdk file which has the disk data.
|
||||
LOG.debug(_("Uploading image %s") % snapshot_name)
|
||||
vmware_images.upload_image(
|
||||
snapshot_name,
|
||||
instance,
|
||||
os_type=os_type,
|
||||
adapter_type=adapter_type,
|
||||
image_version=1,
|
||||
host=self._session._host_ip,
|
||||
data_center_name=self._get_datacenter_name_and_ref()[1],
|
||||
datastore_name=datastore_name,
|
||||
cookies=cookies,
|
||||
file_path="vmware-tmp/%s-flat.vmdk" % random_name)
|
||||
LOG.debug(_("Uploaded image %s") % snapshot_name)
|
||||
|
||||
_upload_vmdk_to_image_repository()
|
||||
|
||||
def _clean_temp_data():
|
||||
#Delete the temporary vmdk created above.
|
||||
LOG.debug(_("Deleting temporary vmdk file %s")
|
||||
% dest_vmdk_file_location)
|
||||
remove_disk_task = self._session._call_method(
|
||||
self._session._get_vim(),
|
||||
"DeleteVirtualDisk_Task",
|
||||
service_content.virtualDiskManager,
|
||||
name=dest_vmdk_file_location,
|
||||
datacenter=dc_ref)
|
||||
self._session._wait_for_task(instance.id, remove_disk_task)
|
||||
LOG.debug(_("Deleted temporary vmdk file %s")
|
||||
% dest_vmdk_file_location)
|
||||
|
||||
_clean_temp_data()
|
||||
|
||||
def reboot(self, instance):
|
||||
""" Reboot a VM instance """
|
||||
|
@ -401,7 +401,7 @@ class SessionBase(object):
|
||||
field in _db_content[cls][ref]):
|
||||
return _db_content[cls][ref][field]
|
||||
|
||||
LOG.debuug(_('Raising NotImplemented'))
|
||||
LOG.debug(_('Raising NotImplemented'))
|
||||
raise NotImplementedError(
|
||||
_('xenapi.fake does not have an implementation for %s or it has '
|
||||
'been called with the wrong number of arguments') % name)
|
||||
|
@ -86,7 +86,8 @@ class VMHelper(HelperBase):
|
||||
the pv_kernel flag indicates whether the guest is HVM or PV
|
||||
"""
|
||||
|
||||
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
|
||||
instance_type = instance_types.\
|
||||
get_instance_type(instance.instance_type)
|
||||
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
|
||||
vcpus = str(instance_type['vcpus'])
|
||||
rec = {
|
||||
@ -144,7 +145,8 @@ class VMHelper(HelperBase):
|
||||
|
||||
@classmethod
|
||||
def ensure_free_mem(cls, session, instance):
|
||||
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
|
||||
instance_type = instance_types.get_instance_type(
|
||||
instance.instance_type)
|
||||
mem = long(instance_type['memory_mb']) * 1024 * 1024
|
||||
#get free memory from host
|
||||
host = session.get_xenapi_host()
|
||||
@ -205,19 +207,17 @@ class VMHelper(HelperBase):
|
||||
"""Destroy VBD from host database"""
|
||||
try:
|
||||
task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
|
||||
#FIXME(armando): find a solution to missing instance_id
|
||||
#with Josh Kearney
|
||||
session.wait_for_task(0, task)
|
||||
session.wait_for_task(task)
|
||||
except cls.XenAPI.Failure, exc:
|
||||
LOG.exception(exc)
|
||||
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
|
||||
|
||||
@classmethod
|
||||
def create_vif(cls, session, vm_ref, network_ref, mac_address):
|
||||
def create_vif(cls, session, vm_ref, network_ref, mac_address, dev="0"):
|
||||
"""Create a VIF record. Returns a Deferred that gives the new
|
||||
VIF reference."""
|
||||
vif_rec = {}
|
||||
vif_rec['device'] = '0'
|
||||
vif_rec['device'] = dev
|
||||
vif_rec['network'] = network_ref
|
||||
vif_rec['VM'] = vm_ref
|
||||
vif_rec['MAC'] = mac_address
|
||||
@ -269,7 +269,7 @@ class VMHelper(HelperBase):
|
||||
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
|
||||
|
||||
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
|
||||
template_vm_ref = session.wait_for_task(instance_id, task)
|
||||
template_vm_ref = session.wait_for_task(task, instance_id)
|
||||
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
|
||||
template_vdi_uuid = template_vdi_rec["uuid"]
|
||||
|
||||
@ -302,7 +302,7 @@ class VMHelper(HelperBase):
|
||||
|
||||
kwargs = {'params': pickle.dumps(params)}
|
||||
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
|
||||
session.wait_for_task(instance_id, task)
|
||||
session.wait_for_task(task, instance_id)
|
||||
|
||||
@classmethod
|
||||
def fetch_image(cls, session, instance_id, image, user, project,
|
||||
@ -345,7 +345,7 @@ class VMHelper(HelperBase):
|
||||
|
||||
kwargs = {'params': pickle.dumps(params)}
|
||||
task = session.async_call_plugin('glance', 'download_vhd', kwargs)
|
||||
vdi_uuid = session.wait_for_task(instance_id, task)
|
||||
vdi_uuid = session.wait_for_task(task, instance_id)
|
||||
|
||||
scan_sr(session, instance_id, sr_ref)
|
||||
|
||||
@ -401,7 +401,7 @@ class VMHelper(HelperBase):
|
||||
#let the plugin copy the correct number of bytes
|
||||
args['image-size'] = str(vdi_size)
|
||||
task = session.async_call_plugin('glance', fn, args)
|
||||
filename = session.wait_for_task(instance_id, task)
|
||||
filename = session.wait_for_task(task, instance_id)
|
||||
#remove the VDI as it is not needed anymore
|
||||
session.get_xenapi().VDI.destroy(vdi)
|
||||
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
|
||||
@ -493,7 +493,7 @@ class VMHelper(HelperBase):
|
||||
if image_type == ImageType.DISK_RAW:
|
||||
args['raw'] = 'true'
|
||||
task = session.async_call_plugin('objectstore', fn, args)
|
||||
uuid = session.wait_for_task(instance_id, task)
|
||||
uuid = session.wait_for_task(task, instance_id)
|
||||
return uuid
|
||||
|
||||
@classmethod
|
||||
@ -513,7 +513,7 @@ class VMHelper(HelperBase):
|
||||
args = {}
|
||||
args['vdi-ref'] = vdi_ref
|
||||
task = session.async_call_plugin('objectstore', fn, args)
|
||||
pv_str = session.wait_for_task(instance_id, task)
|
||||
pv_str = session.wait_for_task(task, instance_id)
|
||||
pv = None
|
||||
if pv_str.lower() == 'true':
|
||||
pv = True
|
||||
@ -654,7 +654,7 @@ def get_vhd_parent_uuid(session, vdi_ref):
|
||||
def scan_sr(session, instance_id, sr_ref):
|
||||
LOG.debug(_("Re-scanning SR %s"), sr_ref)
|
||||
task = session.call_xenapi('Async.SR.scan', sr_ref)
|
||||
session.wait_for_task(instance_id, task)
|
||||
session.wait_for_task(task, instance_id)
|
||||
|
||||
|
||||
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
|
||||
|
@ -49,6 +49,7 @@ class VMOps(object):
|
||||
def __init__(self, session):
|
||||
self.XenAPI = session.get_imported_xenapi()
|
||||
self._session = session
|
||||
|
||||
VMHelper.XenAPI = self.XenAPI
|
||||
|
||||
def list_instances(self):
|
||||
@ -62,20 +63,20 @@ class VMOps(object):
|
||||
|
||||
def spawn(self, instance):
|
||||
"""Create VM instance"""
|
||||
vm = VMHelper.lookup(self._session, instance.name)
|
||||
instance_name = instance.name
|
||||
vm = VMHelper.lookup(self._session, instance_name)
|
||||
if vm is not None:
|
||||
raise exception.Duplicate(_('Attempted to create'
|
||||
' non-unique name %s') % instance.name)
|
||||
' non-unique name %s') % instance_name)
|
||||
|
||||
#ensure enough free memory is available
|
||||
if not VMHelper.ensure_free_mem(self._session, instance):
|
||||
name = instance['name']
|
||||
LOG.exception(_('instance %(name)s: not enough free memory')
|
||||
% locals())
|
||||
db.instance_set_state(context.get_admin_context(),
|
||||
instance['id'],
|
||||
power_state.SHUTDOWN)
|
||||
return
|
||||
LOG.exception(_('instance %(instance_name)s: not enough free '
|
||||
'memory') % locals())
|
||||
db.instance_set_state(context.get_admin_context(),
|
||||
instance['id'],
|
||||
power_state.SHUTDOWN)
|
||||
return
|
||||
|
||||
user = AuthManager().get_user(instance.user_id)
|
||||
project = AuthManager().get_project(instance.project_id)
|
||||
@ -116,10 +117,9 @@ class VMOps(object):
|
||||
self.create_vifs(instance, networks)
|
||||
|
||||
LOG.debug(_('Starting VM %s...'), vm_ref)
|
||||
self._session.call_xenapi('VM.start', vm_ref, False, False)
|
||||
instance_name = instance.name
|
||||
self._start(instance, vm_ref)
|
||||
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
|
||||
% locals())
|
||||
% locals())
|
||||
|
||||
def _inject_onset_files():
|
||||
onset_files = instance.onset_files
|
||||
@ -143,18 +143,18 @@ class VMOps(object):
|
||||
|
||||
def _wait_for_boot():
|
||||
try:
|
||||
state = self.get_info(instance['name'])['state']
|
||||
state = self.get_info(instance_name)['state']
|
||||
db.instance_set_state(context.get_admin_context(),
|
||||
instance['id'], state)
|
||||
if state == power_state.RUNNING:
|
||||
LOG.debug(_('Instance %s: booted'), instance['name'])
|
||||
LOG.debug(_('Instance %s: booted'), instance_name)
|
||||
timer.stop()
|
||||
_inject_onset_files()
|
||||
return True
|
||||
except Exception, exc:
|
||||
LOG.warn(exc)
|
||||
LOG.exception(_('instance %s: failed to boot'),
|
||||
instance['name'])
|
||||
instance_name)
|
||||
db.instance_set_state(context.get_admin_context(),
|
||||
instance['id'],
|
||||
power_state.SHUTDOWN)
|
||||
@ -202,6 +202,20 @@ class VMOps(object):
|
||||
_('Instance not present %s') % instance_name)
|
||||
return vm
|
||||
|
||||
def _acquire_bootlock(self, vm):
|
||||
"""Prevent an instance from booting"""
|
||||
self._session.call_xenapi(
|
||||
"VM.set_blocked_operations",
|
||||
vm,
|
||||
{"start": ""})
|
||||
|
||||
def _release_bootlock(self, vm):
|
||||
"""Allow an instance to boot"""
|
||||
self._session.call_xenapi(
|
||||
"VM.remove_from_blocked_operations",
|
||||
vm,
|
||||
"start")
|
||||
|
||||
def snapshot(self, instance, image_id):
|
||||
""" Create snapshot from a running VM instance
|
||||
|
||||
@ -254,7 +268,7 @@ class VMOps(object):
|
||||
"""Reboot VM instance"""
|
||||
vm = self._get_vm_opaque_ref(instance)
|
||||
task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
|
||||
self._session.wait_for_task(instance.id, task)
|
||||
self._session.wait_for_task(task, instance.id)
|
||||
|
||||
def set_admin_password(self, instance, new_pass):
|
||||
"""Set the root/admin password on the VM instance. This is done via
|
||||
@ -294,6 +308,11 @@ class VMOps(object):
|
||||
raise RuntimeError(resp_dict['message'])
|
||||
return resp_dict['message']
|
||||
|
||||
def _start(self, instance, vm):
|
||||
"""Start an instance"""
|
||||
task = self._session.call_xenapi("Async.VM.start", vm, False, False)
|
||||
self._session.wait_for_task(task, instance.id)
|
||||
|
||||
def inject_file(self, instance, b64_path, b64_contents):
|
||||
"""Write a file to the VM instance. The path to which it is to be
|
||||
written and the contents of the file need to be supplied; both should
|
||||
@ -320,8 +339,8 @@ class VMOps(object):
|
||||
raise RuntimeError(resp_dict['message'])
|
||||
return resp_dict['message']
|
||||
|
||||
def _shutdown(self, instance, vm):
|
||||
"""Shutdown an instance """
|
||||
def _shutdown(self, instance, vm, hard=True):
|
||||
"""Shutdown an instance"""
|
||||
state = self.get_info(instance['name'])['state']
|
||||
if state == power_state.SHUTDOWN:
|
||||
LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
|
||||
@ -332,8 +351,13 @@ class VMOps(object):
|
||||
LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
|
||||
% locals())
|
||||
try:
|
||||
task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
|
||||
self._session.wait_for_task(instance.id, task)
|
||||
task = None
|
||||
if hard:
|
||||
task = self._session.call_xenapi("Async.VM.hard_shutdown", vm)
|
||||
else:
|
||||
task = self._session.call_xenapi("Async.VM.clean_shutdown", vm)
|
||||
|
||||
self._session.wait_for_task(task, instance.id)
|
||||
except self.XenAPI.Failure, exc:
|
||||
LOG.exception(exc)
|
||||
|
||||
@ -350,7 +374,7 @@ class VMOps(object):
|
||||
for vdi in vdis:
|
||||
try:
|
||||
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
|
||||
self._session.wait_for_task(instance.id, task)
|
||||
self._session.wait_for_task(task, instance.id)
|
||||
except self.XenAPI.Failure, exc:
|
||||
LOG.exception(exc)
|
||||
|
||||
@ -389,7 +413,7 @@ class VMOps(object):
|
||||
args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
|
||||
task = self._session.async_call_plugin(
|
||||
'glance', 'remove_kernel_ramdisk', args)
|
||||
self._session.wait_for_task(instance.id, task)
|
||||
self._session.wait_for_task(task, instance.id)
|
||||
|
||||
LOG.debug(_("kernel/ramdisk files removed"))
|
||||
|
||||
@ -398,7 +422,7 @@ class VMOps(object):
|
||||
instance_id = instance.id
|
||||
try:
|
||||
task = self._session.call_xenapi('Async.VM.destroy', vm)
|
||||
self._session.wait_for_task(instance_id, task)
|
||||
self._session.wait_for_task(task, instance_id)
|
||||
except self.XenAPI.Failure, exc:
|
||||
LOG.exception(exc)
|
||||
|
||||
@ -441,7 +465,7 @@ class VMOps(object):
|
||||
def _wait_with_callback(self, instance_id, task, callback):
|
||||
ret = None
|
||||
try:
|
||||
ret = self._session.wait_for_task(instance_id, task)
|
||||
ret = self._session.wait_for_task(task, instance_id)
|
||||
except self.XenAPI.Failure, exc:
|
||||
LOG.exception(exc)
|
||||
callback(ret)
|
||||
@ -470,6 +494,78 @@ class VMOps(object):
|
||||
task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
|
||||
self._wait_with_callback(instance.id, task, callback)
|
||||
|
||||
def rescue(self, instance, callback):
|
||||
"""Rescue the specified instance
|
||||
- shutdown the instance VM
|
||||
- set 'bootlock' to prevent the instance from starting in rescue
|
||||
- spawn a rescue VM (the vm name-label will be instance-N-rescue)
|
||||
|
||||
"""
|
||||
rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
|
||||
if rescue_vm:
|
||||
raise RuntimeError(_(
|
||||
"Instance is already in Rescue Mode: %s" % instance.name))
|
||||
|
||||
vm = self._get_vm_opaque_ref(instance)
|
||||
self._shutdown(instance, vm)
|
||||
self._acquire_bootlock(vm)
|
||||
|
||||
instance._rescue = True
|
||||
self.spawn(instance)
|
||||
rescue_vm = self._get_vm_opaque_ref(instance)
|
||||
|
||||
vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0]
|
||||
vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"]
|
||||
vbd_ref = VMHelper.create_vbd(
|
||||
self._session,
|
||||
rescue_vm,
|
||||
vdi_ref,
|
||||
1,
|
||||
False)
|
||||
|
||||
self._session.call_xenapi("Async.VBD.plug", vbd_ref)
|
||||
|
||||
def unrescue(self, instance, callback):
|
||||
"""Unrescue the specified instance
|
||||
- unplug the instance VM's disk from the rescue VM
|
||||
- teardown the rescue VM
|
||||
- release the bootlock to allow the instance VM to start
|
||||
|
||||
"""
|
||||
rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue")
|
||||
|
||||
if not rescue_vm:
|
||||
raise exception.NotFound(_(
|
||||
"Instance is not in Rescue Mode: %s" % instance.name))
|
||||
|
||||
original_vm = self._get_vm_opaque_ref(instance)
|
||||
vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm)
|
||||
|
||||
instance._rescue = False
|
||||
|
||||
for vbd_ref in vbds:
|
||||
vbd = self._session.get_xenapi().VBD.get_record(vbd_ref)
|
||||
if vbd["userdevice"] == "1":
|
||||
VMHelper.unplug_vbd(self._session, vbd_ref)
|
||||
VMHelper.destroy_vbd(self._session, vbd_ref)
|
||||
|
||||
task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm)
|
||||
self._session.wait_for_task(task1, instance.id)
|
||||
|
||||
vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm)
|
||||
for vdi in vdis:
|
||||
try:
|
||||
task = self._session.call_xenapi('Async.VDI.destroy', vdi)
|
||||
self._session.wait_for_task(task, instance.id)
|
||||
except self.XenAPI.Failure:
|
||||
continue
|
||||
|
||||
task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm)
|
||||
self._session.wait_for_task(task2, instance.id)
|
||||
|
||||
self._release_bootlock(original_vm)
|
||||
self._start(instance, original_vm)
|
||||
|
||||
def get_info(self, instance):
|
||||
"""Return data about VM instance"""
|
||||
vm = self._get_vm_opaque_ref(instance)
|
||||
@ -514,18 +610,30 @@ class VMOps(object):
|
||||
network_IPs = [ip for ip in IPs if ip.network_id == network.id]
|
||||
|
||||
def ip_dict(ip):
|
||||
return {'netmask': network['netmask'],
|
||||
'enabled': '1',
|
||||
'ip': ip.address}
|
||||
return {
|
||||
"ip": ip.address,
|
||||
"netmask": network["netmask"],
|
||||
"enabled": "1"}
|
||||
|
||||
def ip6_dict(ip6):
|
||||
return {
|
||||
"ip": ip6.addressV6,
|
||||
"netmask": ip6.netmaskV6,
|
||||
"gateway": ip6.gatewayV6,
|
||||
"enabled": "1"}
|
||||
|
||||
mac_id = instance.mac_address.replace(':', '')
|
||||
location = 'vm-data/networking/%s' % mac_id
|
||||
mapping = {'label': network['label'],
|
||||
'gateway': network['gateway'],
|
||||
'mac': instance.mac_address,
|
||||
'dns': [network['dns']],
|
||||
'ips': [ip_dict(ip) for ip in network_IPs]}
|
||||
mapping = {
|
||||
'label': network['label'],
|
||||
'gateway': network['gateway'],
|
||||
'mac': instance.mac_address,
|
||||
'dns': [network['dns']],
|
||||
'ips': [ip_dict(ip) for ip in network_IPs],
|
||||
'ip6s': [ip6_dict(ip) for ip in network_IPs]}
|
||||
|
||||
self.write_to_param_xenstore(vm_opaque_ref, {location: mapping})
|
||||
|
||||
try:
|
||||
self.write_to_xenstore(vm_opaque_ref, location,
|
||||
mapping['location'])
|
||||
@ -556,8 +664,17 @@ class VMOps(object):
|
||||
NetworkHelper.find_network_with_bridge(self._session, bridge)
|
||||
|
||||
if network_ref:
|
||||
VMHelper.create_vif(self._session, vm_opaque_ref,
|
||||
network_ref, instance.mac_address)
|
||||
try:
|
||||
device = "1" if instance._rescue else "0"
|
||||
except AttributeError:
|
||||
device = "0"
|
||||
|
||||
VMHelper.create_vif(
|
||||
self._session,
|
||||
vm_opaque_ref,
|
||||
network_ref,
|
||||
instance.mac_address,
|
||||
device)
|
||||
|
||||
def reset_network(self, instance):
|
||||
"""
|
||||
@ -627,7 +744,7 @@ class VMOps(object):
|
||||
args.update(addl_args)
|
||||
try:
|
||||
task = self._session.async_call_plugin(plugin, method, args)
|
||||
ret = self._session.wait_for_task(instance_id, task)
|
||||
ret = self._session.wait_for_task(task, instance_id)
|
||||
except self.XenAPI.Failure, e:
|
||||
ret = None
|
||||
err_trace = e.details[-1]
|
||||
|
@ -83,7 +83,7 @@ class VolumeOps(object):
|
||||
try:
|
||||
task = self._session.call_xenapi('Async.VBD.plug',
|
||||
vbd_ref)
|
||||
self._session.wait_for_task(vol_rec['deviceNumber'], task)
|
||||
self._session.wait_for_task(task, vol_rec['deviceNumber'])
|
||||
except self.XenAPI.Failure, exc:
|
||||
LOG.exception(exc)
|
||||
VolumeHelper.destroy_iscsi_storage(self._session,
|
||||
|
@ -196,6 +196,14 @@ class XenAPIConnection(object):
|
||||
"""resume the specified instance"""
|
||||
self._vmops.resume(instance, callback)
|
||||
|
||||
def rescue(self, instance, callback):
|
||||
"""Rescue the specified instance"""
|
||||
self._vmops.rescue(instance, callback)
|
||||
|
||||
def unrescue(self, instance, callback):
|
||||
"""Unrescue the specified instance"""
|
||||
self._vmops.unrescue(instance, callback)
|
||||
|
||||
def reset_network(self, instance):
|
||||
"""reset networking for specified instance"""
|
||||
self._vmops.reset_network(instance)
|
||||
@ -279,7 +287,7 @@ class XenAPISession(object):
|
||||
self._session.xenapi.Async.host.call_plugin,
|
||||
self.get_xenapi_host(), plugin, fn, args)
|
||||
|
||||
def wait_for_task(self, id, task):
|
||||
def wait_for_task(self, task, id=None):
|
||||
"""Return the result of the given task. The task is polled
|
||||
until it completes. Not re-entrant."""
|
||||
done = event.Event()
|
||||
@ -306,10 +314,11 @@ class XenAPISession(object):
|
||||
try:
|
||||
name = self._session.xenapi.task.get_name_label(task)
|
||||
status = self._session.xenapi.task.get_status(task)
|
||||
action = dict(
|
||||
instance_id=int(id),
|
||||
action=name[0:255], # Ensure action is never > 255
|
||||
error=None)
|
||||
if id:
|
||||
action = dict(
|
||||
instance_id=int(id),
|
||||
action=name[0:255], # Ensure action is never > 255
|
||||
error=None)
|
||||
if status == "pending":
|
||||
return
|
||||
elif status == "success":
|
||||
@ -323,7 +332,9 @@ class XenAPISession(object):
|
||||
LOG.warn(_("Task [%(name)s] %(task)s status:"
|
||||
" %(status)s %(error_info)s") % locals())
|
||||
done.send_exception(self.XenAPI.Failure(error_info))
|
||||
db.instance_action_create(context.get_admin_context(), action)
|
||||
|
||||
if id:
|
||||
db.instance_action_create(context.get_admin_context(), action)
|
||||
except self.XenAPI.Failure, exc:
|
||||
LOG.warn(exc)
|
||||
done.send_exception(*sys.exc_info())
|
||||
|
@ -207,8 +207,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port):
|
||||
'transfer-encoding': 'chunked',
|
||||
'x-image-meta-is_public': 'True',
|
||||
'x-image-meta-status': 'queued',
|
||||
'x-image-meta-type': 'vhd'
|
||||
}
|
||||
'x-image-meta-type': 'vhd'}
|
||||
for header, value in headers.iteritems():
|
||||
conn.putheader(header, value)
|
||||
conn.endheaders()
|
||||
|
@ -29,3 +29,4 @@ sqlalchemy-migrate
|
||||
netaddr
|
||||
sphinx
|
||||
glance
|
||||
suds==0.4
|
||||
|
Loading…
Reference in New Issue
Block a user