KVM and XEN Disk Management Parity
Implements blueprint disk-configuration-parity This change splits local_gb into root_gb and ephemeral_gb. libvirt interpreted local_gb as what ephemeral_gb is now, whereas XenAPI interpreted local_gb as what root_gb is now. Change-Id: I496600991bac1e990326d4ded1607fee08209d68
This commit is contained in:
parent
91bc67d81a
commit
a4223f1d89
|
@ -89,7 +89,7 @@ def instance_dict(inst):
|
|||
return {'name': inst['name'],
|
||||
'memory_mb': inst['memory_mb'],
|
||||
'vcpus': inst['vcpus'],
|
||||
'disk_gb': inst['local_gb'],
|
||||
'disk_gb': inst['root_gb'],
|
||||
'flavor_id': inst['flavorid']}
|
||||
|
||||
|
||||
|
|
|
@ -100,9 +100,10 @@ class MetadataRequestHandler(wsgi.Application):
|
|||
mappings = {}
|
||||
mappings['ami'] = block_device.strip_dev(root_device_name)
|
||||
mappings['root'] = root_device_name
|
||||
default_local_device = instance_ref.get('default_local_device')
|
||||
if default_local_device:
|
||||
mappings['ephemeral0'] = default_local_device
|
||||
default_ephemeral_device = \
|
||||
instance_ref.get('default_ephemeral_device')
|
||||
if default_ephemeral_device:
|
||||
mappings['ephemeral0'] = default_ephemeral_device
|
||||
default_swap_device = instance_ref.get('default_swap_device')
|
||||
if default_swap_device:
|
||||
mappings['swap'] = default_swap_device
|
||||
|
|
|
@ -135,7 +135,7 @@ class SimpleTenantUsageController(object):
|
|||
info['name'] = instance['display_name']
|
||||
|
||||
info['memory_mb'] = flavor['memory_mb']
|
||||
info['local_gb'] = flavor['local_gb']
|
||||
info['local_gb'] = flavor['root_gb'] + flavor['ephemeral_gb']
|
||||
info['vcpus'] = flavor['vcpus']
|
||||
|
||||
info['tenant_id'] = instance['project_id']
|
||||
|
|
|
@ -104,7 +104,7 @@ class Controller(wsgi.Controller):
|
|||
|
||||
if 'minDisk' in req.params:
|
||||
try:
|
||||
filters['min_local_gb'] = int(req.params['minDisk'])
|
||||
filters['min_root_gb'] = int(req.params['minDisk'])
|
||||
except ValueError:
|
||||
pass # ignore bogus values per spec
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ class ViewBuilder(common.ViewBuilder):
|
|||
"id": flavor["flavorid"],
|
||||
"name": flavor["name"],
|
||||
"ram": flavor["memory_mb"],
|
||||
"disk": flavor["local_gb"],
|
||||
"disk": flavor["root_gb"],
|
||||
"vcpus": flavor.get("vcpus") or "",
|
||||
"swap": flavor.get("swap") or "",
|
||||
"rxtx_factor": flavor.get("rxtx_factor") or "",
|
||||
|
|
|
@ -226,7 +226,7 @@ class API(base.Base):
|
|||
|
||||
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
|
||||
raise exception.InstanceTypeMemoryTooSmall()
|
||||
if instance_type['local_gb'] < int(image.get('min_disk') or 0):
|
||||
if instance_type['root_gb'] < int(image.get('min_disk') or 0):
|
||||
raise exception.InstanceTypeDiskTooSmall()
|
||||
|
||||
config_drive_id = None
|
||||
|
@ -315,7 +315,8 @@ class API(base.Base):
|
|||
'instance_type_id': instance_type['id'],
|
||||
'memory_mb': instance_type['memory_mb'],
|
||||
'vcpus': instance_type['vcpus'],
|
||||
'local_gb': instance_type['local_gb'],
|
||||
'root_gb': instance_type['root_gb'],
|
||||
'ephemeral_gb': instance_type['ephemeral_gb'],
|
||||
'display_name': display_name,
|
||||
'display_description': display_description,
|
||||
'user_data': user_data or '',
|
||||
|
@ -376,13 +377,13 @@ class API(base.Base):
|
|||
|
||||
# TODO(yamahata): ephemeralN where N > 0
|
||||
# Only ephemeral0 is allowed for now because InstanceTypes
|
||||
# table only allows single local disk, local_gb.
|
||||
# table only allows single local disk, ephemeral_gb.
|
||||
# In order to enhance it, we need to add a new columns to
|
||||
# instance_types table.
|
||||
if num > 0:
|
||||
return 0
|
||||
|
||||
size = instance_type.get('local_gb')
|
||||
size = instance_type.get('ephemeral_gb')
|
||||
|
||||
return size
|
||||
|
||||
|
@ -1240,7 +1241,7 @@ class API(base.Base):
|
|||
#disk format of vhd is non-shrinkable
|
||||
if orig_image.get('disk_format') == 'vhd':
|
||||
min_ram = instance['instance_type']['memory_mb']
|
||||
min_disk = instance['instance_type']['local_gb']
|
||||
min_disk = instance['instance_type']['root_gb']
|
||||
else:
|
||||
#set new image values to the original image values
|
||||
min_ram = orig_image.get('min_ram')
|
||||
|
|
|
@ -30,13 +30,14 @@ FLAGS = flags.FLAGS
|
|||
LOG = logging.getLogger('nova.instance_types')
|
||||
|
||||
|
||||
def create(name, memory, vcpus, local_gb, flavorid, swap=0,
|
||||
def create(name, memory, vcpus, root_gb, ephemeral_gb, flavorid, swap=0,
|
||||
rxtx_factor=1):
|
||||
"""Creates instance types."""
|
||||
kwargs = {
|
||||
'memory_mb': memory,
|
||||
'vcpus': vcpus,
|
||||
'local_gb': local_gb,
|
||||
'root_gb': root_gb,
|
||||
'ephemeral_gb': ephemeral_gb,
|
||||
'swap': swap,
|
||||
'rxtx_factor': rxtx_factor,
|
||||
}
|
||||
|
|
|
@ -432,12 +432,12 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||
|
||||
instance_type_id = instance['instance_type_id']
|
||||
instance_type = instance_types.get_instance_type(instance_type_id)
|
||||
allowed_size_gb = instance_type['local_gb']
|
||||
allowed_size_gb = instance_type['root_gb']
|
||||
|
||||
# NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we
|
||||
# need to handle potential situations where local_gb is 0. This is
|
||||
# the default for m1.tiny.
|
||||
if allowed_size_gb == 0:
|
||||
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
|
||||
# since libvirt interpreted the value differently than other
|
||||
# drivers. A value of 0 means don't check size.
|
||||
if not allowed_size_gb:
|
||||
return image_meta
|
||||
|
||||
allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
|
||||
|
@ -1111,7 +1111,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||
memory_mb=instance_type['memory_mb'],
|
||||
host=migration_ref['source_compute'],
|
||||
vcpus=instance_type['vcpus'],
|
||||
local_gb=instance_type['local_gb'],
|
||||
root_gb=instance_type['root_gb'],
|
||||
ephemeral_gb=instance_type['ephemeral_gb'],
|
||||
instance_type_id=instance_type['id'])
|
||||
|
||||
self.driver.finish_revert_migration(instance_ref)
|
||||
|
@ -1238,7 +1239,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||
dict(instance_type_id=instance_type['id'],
|
||||
memory_mb=instance_type['memory_mb'],
|
||||
vcpus=instance_type['vcpus'],
|
||||
local_gb=instance_type['local_gb']))
|
||||
root_gb=instance_type['root_gb'],
|
||||
ephemeral_gb=instance_type['ephemeral_gb']))
|
||||
resize_instance = True
|
||||
|
||||
instance_ref = self.db.instance_get_by_uuid(context,
|
||||
|
|
|
@ -3380,9 +3380,9 @@ def instance_type_get_all(context, inactive=False, filters=None):
|
|||
if 'min_memory_mb' in filters:
|
||||
query = query.filter(
|
||||
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
|
||||
if 'min_local_gb' in filters:
|
||||
if 'min_root_gb' in filters:
|
||||
query = query.filter(
|
||||
models.InstanceTypes.local_gb >= filters['min_local_gb'])
|
||||
models.InstanceTypes.root_gb >= filters['min_root_gb'])
|
||||
|
||||
inst_types = query.order_by("name").all()
|
||||
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sqlalchemy
|
||||
from sqlalchemy import select, Column, Integer
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.db.sqlalchemy.migrate_repo.versions')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
meta = sqlalchemy.MetaData()
|
||||
|
||||
|
||||
def _get_table(name):
|
||||
return sqlalchemy.Table(name, meta, autoload=True)
|
||||
|
||||
|
||||
def upgrade_libvirt(instances, instance_types):
|
||||
# Update instance_types first
|
||||
tiny = None
|
||||
for inst_type in instance_types.select().execute():
|
||||
if inst_type['name'] == 'm1.tiny':
|
||||
tiny = inst_type['id']
|
||||
root_gb = 0
|
||||
else:
|
||||
root_gb = 10
|
||||
|
||||
instance_types.update()\
|
||||
.values(root_gb=root_gb,
|
||||
ephemeral_gb=inst_type['local_gb'])\
|
||||
.where(instance_types.c.id == inst_type['id'])\
|
||||
.execute()
|
||||
|
||||
# then update instances following same pattern
|
||||
instances.update()\
|
||||
.values(root_gb=10,
|
||||
ephemeral_gb=instances.c.local_gb)\
|
||||
.execute()
|
||||
|
||||
if tiny is not None:
|
||||
instances.update()\
|
||||
.values(root_gb=0,
|
||||
ephemeral_gb=instances.c.local_gb)\
|
||||
.where(instances.c.instance_type_id == tiny)\
|
||||
.execute()
|
||||
|
||||
|
||||
def upgrade_other(instances, instance_types):
|
||||
for table in (instances, instance_types):
|
||||
table.update().values(root_gb=table.c.local_gb,
|
||||
ephemeral_gb=0).execute()
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
if not FLAGS.connection_type:
|
||||
raise exception.Error("Need connection_type specified to run "
|
||||
"migration")
|
||||
|
||||
meta.bind = migrate_engine
|
||||
instances = _get_table('instances')
|
||||
instance_types = _get_table('instance_types')
|
||||
|
||||
for table in (instances, instance_types):
|
||||
root_gb = Column('root_gb', Integer)
|
||||
root_gb.create(table)
|
||||
ephemeral_gb = Column('ephemeral_gb', Integer)
|
||||
ephemeral_gb.create(table)
|
||||
|
||||
# Since this migration is part of the work to get all drivers
|
||||
# working the same way, we need to treat the new root_gb and
|
||||
# ephemeral_gb columns differently depending on what the
|
||||
# driver implementation used to behave like.
|
||||
if FLAGS.connection_type == 'libvirt':
|
||||
upgrade_libvirt(instances, instance_types)
|
||||
else:
|
||||
upgrade_other(instances, instance_types)
|
||||
|
||||
default_local_device = instances.c.default_local_device
|
||||
default_local_device.alter(name='default_ephemeral_device')
|
||||
|
||||
for table in (instances, instance_types):
|
||||
table.drop_column('local_gb')
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
instances = _get_table('instances')
|
||||
instance_types = _get_table('instance_types')
|
||||
|
||||
for table in (instances, instance_types):
|
||||
local_gb = Column('root_gb', Integer)
|
||||
local_gb.create(table)
|
||||
|
||||
try:
|
||||
for table in (instances, instance_types):
|
||||
if FLAGS.connection_type == 'libvirt':
|
||||
column = table.c.ephemeral_gb
|
||||
else:
|
||||
column = table.c.root_gb
|
||||
table.update().values(local_gb=column).execute()
|
||||
except Exception:
|
||||
for table in (instances, instance_types):
|
||||
table.drop_column('local_gb')
|
||||
raise
|
||||
|
||||
default_ephemeral_device = instances.c.default_ephemeral_device
|
||||
default_ephemeral_device.alter(name='default_local_device')
|
||||
|
||||
for table in (instances, instance_types):
|
||||
table.drop_column('root_gb')
|
||||
table.drop_column('ephemeral_gb')
|
|
@ -0,0 +1,313 @@
|
|||
-- sqlalchemy-migrate is surprisingly broken when it comes to migrations
|
||||
-- for sqlite. As a result, we have to do much of the work manually here
|
||||
|
||||
BEGIN TRANSACTION;
|
||||
-- make backup of instance_types
|
||||
CREATE TEMPORARY TABLE instance_types_backup (
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted BOOLEAN,
|
||||
name VARCHAR(255),
|
||||
id INTEGER NOT NULL,
|
||||
memory_mb INTEGER NOT NULL,
|
||||
vcpus INTEGER NOT NULL,
|
||||
local_gb INTEGER NOT NULL,
|
||||
swap INTEGER NOT NULL,
|
||||
rxtx_factor FLOAT,
|
||||
vcpu_weight INTEGER,
|
||||
flavorid VARCHAR(255),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (flavorid),
|
||||
CHECK (deleted IN (0, 1)),
|
||||
UNIQUE (name)
|
||||
);
|
||||
|
||||
INSERT INTO instance_types_backup
|
||||
SELECT created_at,
|
||||
updated_at,
|
||||
deleted_at,
|
||||
deleted,
|
||||
name,
|
||||
id,
|
||||
memory_mb,
|
||||
vcpus,
|
||||
local_gb,
|
||||
swap,
|
||||
rxtx_factor,
|
||||
vcpu_weight,
|
||||
flavorid
|
||||
FROM instance_types;
|
||||
|
||||
DROP TABLE instance_types;
|
||||
|
||||
CREATE TABLE instance_types (
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted BOOLEAN,
|
||||
name VARCHAR(255),
|
||||
id INTEGER NOT NULL,
|
||||
memory_mb INTEGER NOT NULL,
|
||||
vcpus INTEGER NOT NULL,
|
||||
root_gb INTEGER NOT NULL,
|
||||
ephemeral_gb INTEGER NOT NULL,
|
||||
swap INTEGER NOT NULL,
|
||||
rxtx_factor FLOAT,
|
||||
vcpu_weight INTEGER,
|
||||
flavorid VARCHAR(255),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (flavorid),
|
||||
CHECK (deleted IN (0, 1)),
|
||||
UNIQUE (name)
|
||||
);
|
||||
|
||||
-- copy from backup to new table with root_gb set to local_gb and
|
||||
-- ephmeral_gb set to 0
|
||||
INSERT INTO instance_types
|
||||
SELECT created_at,
|
||||
updated_at,
|
||||
deleted_at,
|
||||
deleted,
|
||||
name,
|
||||
id,
|
||||
memory_mb,
|
||||
vcpus,
|
||||
local_gb,
|
||||
0,
|
||||
swap,
|
||||
rxtx_factor,
|
||||
vcpu_weight,
|
||||
flavorid
|
||||
FROM instance_types_backup;
|
||||
|
||||
DROP TABLE instance_types_backup;
|
||||
|
||||
-- make backup of instances
|
||||
CREATE TEMPORARY TABLE instances_backup (
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted BOOLEAN,
|
||||
id INTEGER NOT NULL,
|
||||
internal_id INTEGER,
|
||||
user_id VARCHAR(255),
|
||||
project_id VARCHAR(255),
|
||||
image_ref VARCHAR(255),
|
||||
kernel_id VARCHAR(255),
|
||||
ramdisk_id VARCHAR(255),
|
||||
server_name VARCHAR(255),
|
||||
launch_index INTEGER,
|
||||
key_name VARCHAR(255),
|
||||
key_data TEXT,
|
||||
power_state INTEGER,
|
||||
vm_state VARCHAR(255),
|
||||
memory_mb INTEGER,
|
||||
vcpus INTEGER,
|
||||
local_gb INTEGER,
|
||||
hostname VARCHAR(255),
|
||||
host VARCHAR(255),
|
||||
user_data TEXT,
|
||||
reservation_id VARCHAR(255),
|
||||
scheduled_at DATETIME,
|
||||
launched_at DATETIME,
|
||||
terminated_at DATETIME,
|
||||
display_name VARCHAR(255),
|
||||
display_description VARCHAR(255),
|
||||
availability_zone VARCHAR(255),
|
||||
locked BOOLEAN,
|
||||
os_type VARCHAR(255),
|
||||
launched_on TEXT,
|
||||
instance_type_id INTEGER,
|
||||
vm_mode VARCHAR(255),
|
||||
uuid VARCHAR(36),
|
||||
architecture VARCHAR(255),
|
||||
root_device_name VARCHAR(255),
|
||||
access_ip_v4 VARCHAR(255),
|
||||
access_ip_v6 VARCHAR(255),
|
||||
config_drive VARCHAR(255),
|
||||
task_state VARCHAR(255),
|
||||
default_local_device VARCHAR(255),
|
||||
default_swap_device VARCHAR(255),
|
||||
progress INTEGER,
|
||||
auto_disk_config BOOLEAN,
|
||||
shutdown_terminate BOOLEAN,
|
||||
disable_terminate BOOLEAN,
|
||||
PRIMARY KEY (id),
|
||||
CHECK (deleted IN (0, 1)),
|
||||
CHECK (locked IN (0, 1)),
|
||||
CHECK (auto_disk_config IN (0, 1)),
|
||||
CHECK (shutdown_terminate IN (0, 1)),
|
||||
CHECK (disable_terminate IN (0, 1))
|
||||
);
|
||||
|
||||
INSERT INTO instances_backup
|
||||
SELECT created_at,
|
||||
updated_at,
|
||||
deleted_at,
|
||||
deleted,
|
||||
id,
|
||||
internal_id,
|
||||
user_id,
|
||||
project_id,
|
||||
image_ref,
|
||||
kernel_id,
|
||||
ramdisk_id,
|
||||
server_name,
|
||||
launch_index,
|
||||
key_name,
|
||||
key_data,
|
||||
power_state,
|
||||
vm_state,
|
||||
memory_mb,
|
||||
vcpus,
|
||||
local_gb,
|
||||
hostname,
|
||||
host,
|
||||
user_data,
|
||||
reservation_id,
|
||||
scheduled_at,
|
||||
launched_at,
|
||||
terminated_at,
|
||||
display_name,
|
||||
display_description,
|
||||
availability_zone,
|
||||
locked,
|
||||
os_type,
|
||||
launched_on,
|
||||
instance_type_id,
|
||||
vm_mode,
|
||||
uuid,
|
||||
architecture,
|
||||
root_device_name,
|
||||
access_ip_v4,
|
||||
access_ip_v6,
|
||||
config_drive,
|
||||
task_state,
|
||||
default_local_device,
|
||||
default_swap_device,
|
||||
progress,
|
||||
auto_disk_config,
|
||||
shutdown_terminate,
|
||||
disable_terminate
|
||||
FROM instances;
|
||||
|
||||
DROP TABLE instances;
|
||||
|
||||
CREATE TABLE instances (
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
deleted_at DATETIME,
|
||||
deleted BOOLEAN,
|
||||
id INTEGER NOT NULL,
|
||||
internal_id INTEGER,
|
||||
user_id VARCHAR(255),
|
||||
project_id VARCHAR(255),
|
||||
image_ref VARCHAR(255),
|
||||
kernel_id VARCHAR(255),
|
||||
ramdisk_id VARCHAR(255),
|
||||
server_name VARCHAR(255),
|
||||
launch_index INTEGER,
|
||||
key_name VARCHAR(255),
|
||||
key_data TEXT,
|
||||
power_state INTEGER,
|
||||
vm_state VARCHAR(255),
|
||||
memory_mb INTEGER,
|
||||
vcpus INTEGER,
|
||||
root_gb INTEGER,
|
||||
ephemeral_gb INTEGER,
|
||||
hostname VARCHAR(255),
|
||||
host VARCHAR(255),
|
||||
user_data TEXT,
|
||||
reservation_id VARCHAR(255),
|
||||
scheduled_at DATETIME,
|
||||
launched_at DATETIME,
|
||||
terminated_at DATETIME,
|
||||
display_name VARCHAR(255),
|
||||
display_description VARCHAR(255),
|
||||
availability_zone VARCHAR(255),
|
||||
locked BOOLEAN,
|
||||
os_type VARCHAR(255),
|
||||
launched_on TEXT,
|
||||
instance_type_id INTEGER,
|
||||
vm_mode VARCHAR(255),
|
||||
uuid VARCHAR(36),
|
||||
architecture VARCHAR(255),
|
||||
root_device_name VARCHAR(255),
|
||||
access_ip_v4 VARCHAR(255),
|
||||
access_ip_v6 VARCHAR(255),
|
||||
config_drive VARCHAR(255),
|
||||
task_state VARCHAR(255),
|
||||
default_ephemeral_device VARCHAR(255),
|
||||
default_swap_device VARCHAR(255),
|
||||
progress INTEGER,
|
||||
auto_disk_config BOOLEAN,
|
||||
shutdown_terminate BOOLEAN,
|
||||
disable_terminate BOOLEAN,
|
||||
PRIMARY KEY (id),
|
||||
CHECK (deleted IN (0, 1)),
|
||||
CHECK (locked IN (0, 1)),
|
||||
CHECK (auto_disk_config IN (0, 1)),
|
||||
CHECK (shutdown_terminate IN (0, 1)),
|
||||
CHECK (disable_terminate IN (0, 1))
|
||||
);
|
||||
|
||||
CREATE INDEX project_id ON instances (project_id);
|
||||
CREATE UNIQUE INDEX uuid ON instances (uuid);
|
||||
|
||||
-- copy from backup to new table with root_gb set to local_gb and
|
||||
-- ephmeral_gb set to 0
|
||||
INSERT INTO instances
|
||||
SELECT created_at,
|
||||
updated_at,
|
||||
deleted_at,
|
||||
deleted,
|
||||
id,
|
||||
internal_id,
|
||||
user_id,
|
||||
project_id,
|
||||
image_ref,
|
||||
kernel_id,
|
||||
ramdisk_id,
|
||||
server_name,
|
||||
launch_index,
|
||||
key_name,
|
||||
key_data,
|
||||
power_state,
|
||||
vm_state,
|
||||
memory_mb,
|
||||
vcpus,
|
||||
local_gb,
|
||||
0,
|
||||
hostname,
|
||||
host,
|
||||
user_data,
|
||||
reservation_id,
|
||||
scheduled_at,
|
||||
launched_at,
|
||||
terminated_at,
|
||||
display_name,
|
||||
display_description,
|
||||
availability_zone,
|
||||
locked,
|
||||
os_type,
|
||||
launched_on,
|
||||
instance_type_id,
|
||||
vm_mode,
|
||||
uuid,
|
||||
architecture,
|
||||
root_device_name,
|
||||
access_ip_v4,
|
||||
access_ip_v6,
|
||||
config_drive,
|
||||
task_state,
|
||||
default_local_device,
|
||||
default_swap_device,
|
||||
progress,
|
||||
auto_disk_config,
|
||||
shutdown_terminate,
|
||||
disable_terminate
|
||||
FROM instances_backup;
|
||||
|
||||
DROP TABLE instances_backup;
|
||||
COMMIT;
|
|
@ -223,7 +223,8 @@ class Instance(BASE, NovaBase):
|
|||
|
||||
memory_mb = Column(Integer)
|
||||
vcpus = Column(Integer)
|
||||
local_gb = Column(Integer)
|
||||
root_gb = Column(Integer)
|
||||
ephemeral_gb = Column(Integer)
|
||||
|
||||
hostname = Column(String(255))
|
||||
host = Column(String(255)) # , ForeignKey('hosts.id'))
|
||||
|
@ -256,7 +257,7 @@ class Instance(BASE, NovaBase):
|
|||
uuid = Column(String(36))
|
||||
|
||||
root_device_name = Column(String(255))
|
||||
default_local_device = Column(String(255), nullable=True)
|
||||
default_ephemeral_device = Column(String(255), nullable=True)
|
||||
default_swap_device = Column(String(255), nullable=True)
|
||||
config_drive = Column(String(255))
|
||||
|
||||
|
@ -339,7 +340,8 @@ class InstanceTypes(BASE, NovaBase):
|
|||
name = Column(String(255), unique=True)
|
||||
memory_mb = Column(Integer)
|
||||
vcpus = Column(Integer)
|
||||
local_gb = Column(Integer)
|
||||
root_gb = Column(Integer)
|
||||
ephemeral_gb = Column(Integer)
|
||||
flavorid = Column(String(255), unique=True)
|
||||
swap = Column(Integer, nullable=False, default=0)
|
||||
rxtx_factor = Column(Float, nullable=False, default=1)
|
||||
|
|
|
@ -261,7 +261,7 @@ DEFINE_string('my_ip', _get_my_ip(), 'host ip address')
|
|||
DEFINE_list('region_list',
|
||||
[],
|
||||
'list of region=fqdn pairs separated by commas')
|
||||
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||
DEFINE_string('connection_type', None, 'libvirt, xenapi or fake')
|
||||
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||
# NOTE(sirp): my_ip interpolation doesn't work within nested structures
|
||||
|
@ -428,6 +428,10 @@ DEFINE_bool('start_guests_on_host_boot', False,
|
|||
'Whether to restart guests when the host reboots')
|
||||
DEFINE_bool('resume_guests_state_on_host_boot', False,
|
||||
'Whether to start guests, that was running before the host reboot')
|
||||
DEFINE_string('default_ephemeral_format',
|
||||
None,
|
||||
'The default format a ephemeral_volume will be formatted '
|
||||
'with on creation.')
|
||||
|
||||
DEFINE_string('root_helper', 'sudo',
|
||||
'Command prefix to use for running commands as root')
|
||||
|
|
|
@ -115,7 +115,7 @@ class HostState(object):
|
|||
|
||||
def consume_from_instance(self, instance):
|
||||
"""Update information about a host from instance info."""
|
||||
disk_mb = instance['local_gb'] * 1024
|
||||
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
|
||||
ram_mb = instance['memory_mb']
|
||||
vcpus = instance['vcpus']
|
||||
self.free_ram_mb -= ram_mb
|
||||
|
|
|
@ -169,17 +169,21 @@ class SchedulerManager(manager.Manager):
|
|||
project_ids = [i['project_id'] for i in instance_refs]
|
||||
project_ids = list(set(project_ids))
|
||||
for project_id in project_ids:
|
||||
vcpus = [i['vcpus'] for i in instance_refs \
|
||||
if i['project_id'] == project_id]
|
||||
vcpus = [i['vcpus'] for i in instance_refs
|
||||
if i['project_id'] == project_id]
|
||||
|
||||
mem = [i['memory_mb'] for i in instance_refs \
|
||||
if i['project_id'] == project_id]
|
||||
mem = [i['memory_mb'] for i in instance_refs
|
||||
if i['project_id'] == project_id]
|
||||
|
||||
disk = [i['local_gb'] for i in instance_refs \
|
||||
if i['project_id'] == project_id]
|
||||
root = [i['root_gb'] for i in instance_refs
|
||||
if i['project_id'] == project_id]
|
||||
|
||||
usage[project_id] = {'vcpus': reduce(lambda x, y: x + y, vcpus),
|
||||
'memory_mb': reduce(lambda x, y: x + y, mem),
|
||||
'local_gb': reduce(lambda x, y: x + y, disk)}
|
||||
ephemeral = [i['ephemeral_gb'] for i in instance_refs
|
||||
if i['project_id'] == project_id]
|
||||
|
||||
usage[project_id] = {'vcpus': sum(vcpus),
|
||||
'memory_mb': sum(mem),
|
||||
'root_gb': sum(root),
|
||||
'ephemeral_gb': sum(ephemeral)}
|
||||
|
||||
return {'resource': resource, 'usage': usage}
|
||||
|
|
|
@ -123,7 +123,7 @@ class AdminTestCase(test.TestCase):
|
|||
inst = {'name': 'this_inst',
|
||||
'memory_mb': 1024,
|
||||
'vcpus': 2,
|
||||
'local_gb': 500,
|
||||
'root_gb': 500,
|
||||
'flavorid': 1}
|
||||
|
||||
expected_inst_dict = {'name': 'this_inst',
|
||||
|
|
|
@ -34,7 +34,8 @@ FLAGS = flags.FLAGS
|
|||
SERVERS = 5
|
||||
TENANTS = 2
|
||||
HOURS = 24
|
||||
LOCAL_GB = 10
|
||||
ROOT_GB = 10
|
||||
EPHEMERAL_GB = 20
|
||||
MEMORY_MB = 1024
|
||||
VCPUS = 2
|
||||
STOP = datetime.datetime.utcnow()
|
||||
|
@ -44,7 +45,8 @@ START = STOP - datetime.timedelta(hours=HOURS)
|
|||
def fake_instance_type_get(self, context, instance_type_id):
|
||||
return {'id': 1,
|
||||
'vcpus': VCPUS,
|
||||
'local_gb': LOCAL_GB,
|
||||
'root_gb': ROOT_GB,
|
||||
'ephemeral_gb': EPHEMERAL_GB,
|
||||
'memory_mb': MEMORY_MB,
|
||||
'name':
|
||||
'fakeflavor'}
|
||||
|
@ -107,7 +109,7 @@ class SimpleTenantUsageTest(test.TestCase):
|
|||
self.assertEqual(int(usages[i]['total_hours']),
|
||||
SERVERS * HOURS)
|
||||
self.assertEqual(int(usages[i]['total_local_gb_usage']),
|
||||
SERVERS * LOCAL_GB * HOURS)
|
||||
SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
|
||||
self.assertEqual(int(usages[i]['total_memory_mb_usage']),
|
||||
SERVERS * MEMORY_MB * HOURS)
|
||||
self.assertEqual(int(usages[i]['total_vcpus_usage']),
|
||||
|
|
|
@ -101,7 +101,7 @@ def stub_instance(id, user_id='fake', project_id='fake', host=None,
|
|||
"task_state": task_state,
|
||||
"memory_mb": 0,
|
||||
"vcpus": 0,
|
||||
"local_gb": 0,
|
||||
"root_gb": 0,
|
||||
"hostname": "",
|
||||
"host": host,
|
||||
"instance_type": {},
|
||||
|
|
|
@ -38,13 +38,13 @@ FAKE_FLAVORS = {
|
|||
"flavorid": '1',
|
||||
"name": 'flavor 1',
|
||||
"memory_mb": '256',
|
||||
"local_gb": '10'
|
||||
"root_gb": '10'
|
||||
},
|
||||
'flavor 2': {
|
||||
"flavorid": '2',
|
||||
"name": 'flavor 2',
|
||||
"memory_mb": '512',
|
||||
"local_gb": '20'
|
||||
"root_gb": '20'
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ def fake_instance_type_get_all(inactive=False, filters=None):
|
|||
for (flavor_name, flavor) in FAKE_FLAVORS.items():
|
||||
if reject_min('memory_mb', 'min_memory_mb'):
|
||||
continue
|
||||
elif reject_min('local_gb', 'min_local_gb'):
|
||||
elif reject_min('root_gb', 'min_root_gb'):
|
||||
continue
|
||||
|
||||
output[flavor_name] = flavor
|
||||
|
|
|
@ -100,7 +100,7 @@ def stub_instance(id, metadata=None, image_ref="10", flavor_id="1",
|
|||
"task_state": task_state,
|
||||
"memory_mb": 0,
|
||||
"vcpus": 0,
|
||||
"local_gb": 0,
|
||||
"root_gb": 0,
|
||||
"hostname": "",
|
||||
"host": "fake_host",
|
||||
"instance_type": dict(inst_type),
|
||||
|
|
|
@ -514,7 +514,8 @@ def stub_instance(id, user_id='fake', project_id='fake', host=None,
|
|||
"power_state": power_state,
|
||||
"memory_mb": 0,
|
||||
"vcpus": 0,
|
||||
"local_gb": 0,
|
||||
"root_gb": 0,
|
||||
"ephemeral_gb": 0,
|
||||
"hostname": "",
|
||||
"host": host,
|
||||
"instance_type": dict(inst_type),
|
||||
|
|
|
@ -349,29 +349,33 @@ def stub_out_db_instance_api(stubs, injected=True):
|
|||
'm1.tiny': dict(id=2,
|
||||
memory_mb=512,
|
||||
vcpus=1,
|
||||
local_gb=0,
|
||||
root_gb=0,
|
||||
ephemeral_gb=10,
|
||||
flavorid=1,
|
||||
rxtx_cap=1,
|
||||
swap=0),
|
||||
'm1.small': dict(id=5,
|
||||
memory_mb=2048,
|
||||
vcpus=1,
|
||||
local_gb=20,
|
||||
root_gb=20,
|
||||
ephemeral_gb=0,
|
||||
flavorid=2,
|
||||
rxtx_cap=2,
|
||||
swap=0),
|
||||
swap=1024),
|
||||
'm1.medium':
|
||||
dict(id=1,
|
||||
memory_mb=4096,
|
||||
vcpus=2,
|
||||
local_gb=40,
|
||||
root_gb=40,
|
||||
ephemeral_gb=40,
|
||||
flavorid=3,
|
||||
rxtx_cap=3,
|
||||
swap=0),
|
||||
'm1.large': dict(id=3,
|
||||
memory_mb=8192,
|
||||
vcpus=4,
|
||||
local_gb=80,
|
||||
root_gb=80,
|
||||
ephemeral_gb=80,
|
||||
flavorid=4,
|
||||
rxtx_cap=4,
|
||||
swap=0),
|
||||
|
@ -379,7 +383,8 @@ def stub_out_db_instance_api(stubs, injected=True):
|
|||
dict(id=4,
|
||||
memory_mb=16384,
|
||||
vcpus=8,
|
||||
local_gb=160,
|
||||
root_gb=160,
|
||||
ephemeral_gb=160,
|
||||
flavorid=5,
|
||||
rxtx_cap=5,
|
||||
swap=0)}
|
||||
|
|
|
@ -155,7 +155,7 @@ flavor = {'id': 0,
|
|||
'name': 'fake_flavor',
|
||||
'memory_mb': 2048,
|
||||
'vcpus': 2,
|
||||
'local_gb': 10,
|
||||
'root_gb': 10,
|
||||
'flavor_id': 0,
|
||||
'swap': 0,
|
||||
'rxtx_factor': 3}
|
||||
|
|
|
@ -40,14 +40,20 @@ COMPUTE_NODES = [
|
|||
]
|
||||
|
||||
INSTANCES = [
|
||||
dict(local_gb=512, memory_mb=512, vcpus=1, host='host1'),
|
||||
dict(local_gb=512, memory_mb=512, vcpus=1, host='host2'),
|
||||
dict(local_gb=512, memory_mb=512, vcpus=1, host='host2'),
|
||||
dict(local_gb=1024, memory_mb=1024, vcpus=1, host='host3'),
|
||||
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
|
||||
host='host1'),
|
||||
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
|
||||
host='host2'),
|
||||
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
|
||||
host='host2'),
|
||||
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
|
||||
host='host3'),
|
||||
# Broken host
|
||||
dict(local_gb=1024, memory_mb=1024, vcpus=1, host=None),
|
||||
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
|
||||
host=None),
|
||||
# No matching host
|
||||
dict(local_gb=1024, memory_mb=1024, vcpus=1, host='host5'),
|
||||
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
|
||||
host='host5'),
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -104,7 +104,8 @@ class DistributedSchedulerTestCase(test.TestCase):
|
|||
self.stubs.Set(db, 'zone_get_all', fake_zone_get_all)
|
||||
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
|
||||
request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
|
||||
'ephemeral_gb': 0},
|
||||
'instance_properties': {'project_id': 1}}
|
||||
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, request_spec)
|
||||
|
@ -219,7 +220,8 @@ class DistributedSchedulerTestCase(test.TestCase):
|
|||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||
|
||||
request_spec = {'num_instances': 10,
|
||||
'instance_type': {'memory_mb': 512, 'local_gb': 512},
|
||||
'instance_type': {'memory_mb': 512, 'root_gb': 512,
|
||||
'ephemeral_gb': 0},
|
||||
'instance_properties': {'project_id': 1}}
|
||||
self.mox.ReplayAll()
|
||||
weighted_hosts = sched._schedule(fake_context, 'compute',
|
||||
|
@ -260,10 +262,12 @@ class DistributedSchedulerTestCase(test.TestCase):
|
|||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||
|
||||
request_spec = {'num_instances': 10,
|
||||
'instance_type': {'memory_mb': 512, 'local_gb': 512},
|
||||
'instance_type': {'memory_mb': 512, 'root_gb': 512,
|
||||
'ephemeral_gb': 256},
|
||||
'instance_properties': {'project_id': 1,
|
||||
'memory_mb': 512,
|
||||
'local_gb': 512,
|
||||
'root_gb': 512,
|
||||
'ephemeral_gb': 0,
|
||||
'vcpus': 1}}
|
||||
filter_properties = {'local_zone_only': True}
|
||||
self.mox.ReplayAll()
|
||||
|
|
|
@ -267,7 +267,8 @@ class HostFiltersTestCase(test.TestCase):
|
|||
def test_json_filter_passes(self):
|
||||
filt_cls = filters.JsonFilter()
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'local_gb': 200},
|
||||
'root_gb': 200,
|
||||
'ephemeral_gb': 0},
|
||||
'query': self.json_query}
|
||||
capabilities = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1', 'compute',
|
||||
|
@ -279,7 +280,8 @@ class HostFiltersTestCase(test.TestCase):
|
|||
def test_json_filter_passes_with_no_query(self):
|
||||
filt_cls = filters.JsonFilter()
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'local_gb': 200}}
|
||||
'root_gb': 200,
|
||||
'ephemeral_gb': 0}}
|
||||
capabilities = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1', 'compute',
|
||||
{'free_ram_mb': 0,
|
||||
|
@ -290,7 +292,8 @@ class HostFiltersTestCase(test.TestCase):
|
|||
def test_json_filter_fails_on_memory(self):
|
||||
filt_cls = filters.JsonFilter()
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'local_gb': 200},
|
||||
'root_gb': 200,
|
||||
'ephemeral_gb': 0},
|
||||
'query': self.json_query}
|
||||
capabilities = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1', 'compute',
|
||||
|
@ -302,7 +305,8 @@ class HostFiltersTestCase(test.TestCase):
|
|||
def test_json_filter_fails_on_disk(self):
|
||||
filt_cls = filters.JsonFilter()
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'local_gb': 200},
|
||||
'root_gb': 200,
|
||||
'ephemeral_gb': 0},
|
||||
'query': self.json_query}
|
||||
capabilities = {'enabled': True}
|
||||
host = fakes.FakeHostState('host1', 'compute',
|
||||
|
@ -318,7 +322,8 @@ class HostFiltersTestCase(test.TestCase):
|
|||
['>=', '$free_disk_mb', 200 * 1024],
|
||||
'$capabilities.enabled'])
|
||||
filter_properties = {'instance_type': {'memory_mb': 1024,
|
||||
'local_gb': 200},
|
||||
'root_gb': 200,
|
||||
'ephemeral_gb': 0},
|
||||
'query': json_query}
|
||||
capabilities = {'enabled': False}
|
||||
host = fakes.FakeHostState('host1', 'compute',
|
||||
|
|
|
@ -167,15 +167,18 @@ class SchedulerManagerTestCase(test.TestCase):
|
|||
instances = [{'project_id': 'project1',
|
||||
'vcpus': 1,
|
||||
'memory_mb': 128,
|
||||
'local_gb': 128},
|
||||
'root_gb': 128,
|
||||
'ephemeral_gb': 0},
|
||||
{'project_id': 'project1',
|
||||
'vcpus': 2,
|
||||
'memory_mb': 256,
|
||||
'local_gb': 384},
|
||||
'root_gb': 384,
|
||||
'ephemeral_gb': 0},
|
||||
{'project_id': 'project2',
|
||||
'vcpus': 2,
|
||||
'memory_mb': 256,
|
||||
'local_gb': 256}]
|
||||
'root_gb': 256,
|
||||
'ephemeral_gb': 0}]
|
||||
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
|
||||
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
|
||||
|
@ -188,15 +191,17 @@ class SchedulerManagerTestCase(test.TestCase):
|
|||
result = self.manager.show_host_resources(self.context, host)
|
||||
expected = {'usage': {'project1': {'memory_mb': 384,
|
||||
'vcpus': 3,
|
||||
'local_gb': 512},
|
||||
'root_gb': 512,
|
||||
'ephemeral_gb': 0},
|
||||
'project2': {'memory_mb': 256,
|
||||
'vcpus': 2,
|
||||
'local_gb': 256}},
|
||||
'resource': {'vcpus_used': 2,
|
||||
'root_gb': 256,
|
||||
'ephemeral_gb': 0}},
|
||||
'resource': {'vcpus': 4,
|
||||
'vcpus_used': 2,
|
||||
'local_gb': 1024,
|
||||
'local_gb_used': 512,
|
||||
'memory_mb': 1024,
|
||||
'vcpus': 4,
|
||||
'local_gb': 1024,
|
||||
'memory_mb_used': 512}}
|
||||
self.assertDictMatch(result, expected)
|
||||
|
||||
|
@ -358,7 +363,8 @@ class SchedulerTestCase(test.TestCase):
|
|||
'volumes': [volume1, volume2],
|
||||
'power_state': power_state.RUNNING,
|
||||
'memory_mb': 1024,
|
||||
'local_gb': 1024}
|
||||
'root_gb': 1024,
|
||||
'ephemeral_gb': 0}
|
||||
|
||||
def test_live_migration_basic(self):
|
||||
"""Test basic schedule_live_migration functionality"""
|
||||
|
|
|
@ -151,6 +151,8 @@ class BaseTestCase(test.TestCase):
|
|||
type_id = instance_types.get_instance_type_by_name(type_name)['id']
|
||||
inst['instance_type_id'] = type_id
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['root_gb'] = 0
|
||||
inst['ephemeral_gb'] = 0
|
||||
inst.update(params)
|
||||
return db.instance_create(self.context, inst)
|
||||
|
||||
|
@ -168,7 +170,8 @@ class BaseTestCase(test.TestCase):
|
|||
inst['name'] = 'm1.small'
|
||||
inst['memory_mb'] = '1024'
|
||||
inst['vcpus'] = '1'
|
||||
inst['local_gb'] = '20'
|
||||
inst['root_gb'] = '20'
|
||||
inst['ephemeral_gb'] = '10'
|
||||
inst['flavorid'] = '1'
|
||||
inst['swap'] = '2048'
|
||||
inst['rxtx_factor'] = 1
|
||||
|
@ -1458,7 +1461,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
"""Test an instance type with too little disk space"""
|
||||
|
||||
inst_type = instance_types.get_default_instance_type()
|
||||
inst_type['local_gb'] = 1
|
||||
inst_type['root_gb'] = 1
|
||||
|
||||
def fake_show(*args):
|
||||
img = copy(self.fake_image)
|
||||
|
@ -1470,7 +1473,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
self.compute_api.create, self.context, inst_type, None)
|
||||
|
||||
# Now increase the inst_type disk space and make sure all is fine.
|
||||
inst_type['local_gb'] = 2
|
||||
inst_type['root_gb'] = 2
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
inst_type, None)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
@ -1479,7 +1482,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
"""Test an instance type with just enough ram and disk space"""
|
||||
|
||||
inst_type = instance_types.get_default_instance_type()
|
||||
inst_type['local_gb'] = 2
|
||||
inst_type['root_gb'] = 2
|
||||
inst_type['memory_mb'] = 2
|
||||
|
||||
def fake_show(*args):
|
||||
|
@ -1497,7 +1500,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
"""Test an instance type with no min_ram or min_disk"""
|
||||
|
||||
inst_type = instance_types.get_default_instance_type()
|
||||
inst_type['local_gb'] = 1
|
||||
inst_type['root_gb'] = 1
|
||||
inst_type['memory_mb'] = 1
|
||||
|
||||
def fake_show(*args):
|
||||
|
@ -1951,7 +1954,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
|
||||
|
||||
instance = self._create_fake_instance()
|
||||
inst_params = {'local_gb': 2, 'memory_mb': 256}
|
||||
inst_params = {'root_gb': 2, 'memory_mb': 256}
|
||||
instance['instance_type'].update(inst_params)
|
||||
|
||||
image = self.compute_api.snapshot(self.context, instance, 'snap1',
|
||||
|
@ -2777,12 +2780,12 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
self.compute.terminate_instance(self.context, instance['uuid'])
|
||||
|
||||
def test_volume_size(self):
|
||||
local_size = 2
|
||||
ephemeral_size = 2
|
||||
swap_size = 3
|
||||
inst_type = {'local_gb': local_size, 'swap': swap_size}
|
||||
inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size}
|
||||
self.assertEqual(self.compute_api._volume_size(inst_type,
|
||||
'ephemeral0'),
|
||||
local_size)
|
||||
'ephemeral0'),
|
||||
ephemeral_size)
|
||||
self.assertEqual(self.compute_api._volume_size(inst_type,
|
||||
'ephemeral1'),
|
||||
0)
|
||||
|
|
|
@ -66,6 +66,8 @@ class UsageInfoTestCase(test.TestCase):
|
|||
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
|
||||
inst['instance_type_id'] = type_id
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['root_gb'] = 0
|
||||
inst['ephemeral_gb'] = 0
|
||||
inst.update(params)
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
|
|
|
@ -68,13 +68,14 @@ class InstanceTypeTestCase(test.TestCase):
|
|||
original_list = instance_types.get_all_types()
|
||||
|
||||
# create new type and make sure values stick
|
||||
inst_type = instance_types.create(name, 256, 1, 120, flavorid)
|
||||
inst_type = instance_types.create(name, 256, 1, 120, 100, flavorid)
|
||||
inst_type_id = inst_type['id']
|
||||
self.assertEqual(inst_type['flavorid'], flavorid)
|
||||
self.assertEqual(inst_type['name'], name)
|
||||
self.assertEqual(inst_type['memory_mb'], 256)
|
||||
self.assertEqual(inst_type['vcpus'], 1)
|
||||
self.assertEqual(inst_type['local_gb'], 120)
|
||||
self.assertEqual(inst_type['root_gb'], 120)
|
||||
self.assertEqual(inst_type['ephemeral_gb'], 100)
|
||||
self.assertEqual(inst_type['swap'], 0)
|
||||
self.assertEqual(inst_type['rxtx_factor'], 1)
|
||||
|
||||
|
@ -108,22 +109,23 @@ class InstanceTypeTestCase(test.TestCase):
|
|||
def test_invalid_create_args_should_fail(self):
|
||||
"""Ensures that instance type creation fails with invalid args"""
|
||||
invalid_sigs = [
|
||||
(('Zero memory', 0, 1, 10, 'flavor1'), {}),
|
||||
(('Negative memory', -256, 1, 10, 'flavor1'), {}),
|
||||
(('Non-integer memory', 'asdf', 1, 10, 'flavor1'), {}),
|
||||
(('Zero memory', 0, 1, 10, 20, 'flavor1'), {}),
|
||||
(('Negative memory', -256, 1, 10, 20, 'flavor1'), {}),
|
||||
(('Non-integer memory', 'asdf', 1, 10, 20, 'flavor1'), {}),
|
||||
|
||||
(('Zero vcpus', 256, 0, 10, 'flavor1'), {}),
|
||||
(('Negative vcpus', 256, -1, 10, 'flavor1'), {}),
|
||||
(('Non-integer vcpus', 256, 'a', 10, 'flavor1'), {}),
|
||||
(('Zero vcpus', 256, 0, 10, 20, 'flavor1'), {}),
|
||||
(('Negative vcpus', 256, -1, 10, 20, 'flavor1'), {}),
|
||||
(('Non-integer vcpus', 256, 'a', 10, 20, 'flavor1'), {}),
|
||||
|
||||
(('Negative storage', 256, 1, -1, 'flavor1'), {}),
|
||||
(('Non-integer storage', 256, 1, 'a', 'flavor1'), {}),
|
||||
(('Negative storage', 256, 1, -1, 20, 'flavor1'), {}),
|
||||
(('Non-integer storage', 256, 1, 'a', 20, 'flavor1'), {}),
|
||||
|
||||
(('Negative swap', 256, 1, 10, 'flavor1'), {'swap': -1}),
|
||||
(('Non-integer swap', 256, 1, 10, 'flavor1'), {'swap': -1}),
|
||||
(('Negative swap', 256, 1, 10, 20, 'flavor1'), {'swap': -1}),
|
||||
(('Non-integer swap', 256, 1, 10, 20, 'flavor1'), {'swap': -1}),
|
||||
|
||||
(('Negative rxtx_factor', 256, 1, 10, 'f1'), {'rxtx_factor': -1}),
|
||||
(('Non-integer rxtx_factor', 256, 1, 10, 'f1'),
|
||||
(('Negative rxtx_factor', 256, 1, 10, 20, 'f1'),
|
||||
{'rxtx_factor': -1}),
|
||||
(('Non-integer rxtx_factor', 256, 1, 10, 20, 'f1'),
|
||||
{'rxtx_factor': "d"}),
|
||||
]
|
||||
|
||||
|
@ -140,18 +142,18 @@ class InstanceTypeTestCase(test.TestCase):
|
|||
def test_duplicate_names_fail(self):
|
||||
"""Ensures that name duplicates raise ApiError"""
|
||||
name = 'some_name'
|
||||
instance_types.create(name, 256, 1, 120, 'flavor1')
|
||||
instance_types.create(name, 256, 1, 120, 200, 'flavor1')
|
||||
self.assertRaises(exception.ApiError,
|
||||
instance_types.create,
|
||||
name, "256", 1, 120, 'flavor2')
|
||||
name, "256", 1, 120, 200, 'flavor2')
|
||||
|
||||
def test_duplicate_flavorids_fail(self):
|
||||
"""Ensures that flavorid duplicates raise ApiError"""
|
||||
flavorid = 'flavor1'
|
||||
instance_types.create('name one', 256, 1, 120, flavorid)
|
||||
instance_types.create('name one', 256, 1, 120, 200, flavorid)
|
||||
self.assertRaises(exception.ApiError,
|
||||
instance_types.create,
|
||||
'name two', 256, 1, 120, flavorid)
|
||||
'name two', 256, 1, 120, 200, flavorid)
|
||||
|
||||
def test_will_not_destroy_with_no_name(self):
|
||||
"""Ensure destroy sad path of no name raises error"""
|
||||
|
@ -239,14 +241,14 @@ class InstanceTypeFilteringTest(test.TestCase):
|
|||
expected = ['m1.large', 'm1.medium', 'm1.small', 'm1.xlarge']
|
||||
self.assertFilterResults(filters, expected)
|
||||
|
||||
def test_min_local_gb_filter(self):
|
||||
def test_min_root_gb_filter(self):
|
||||
"""Exclude everything but large and xlarge which have >= 80 GB"""
|
||||
filters = dict(min_local_gb=80)
|
||||
filters = dict(min_root_gb=80)
|
||||
expected = ['m1.large', 'm1.xlarge']
|
||||
self.assertFilterResults(filters, expected)
|
||||
|
||||
def test_min_memory_mb_AND_local_gb_filter(self):
|
||||
def test_min_memory_mb_AND_root_gb_filter(self):
|
||||
"""Exclude everything but large and xlarge which have >= 80 GB"""
|
||||
filters = dict(min_memory_mb=16384, min_local_gb=80)
|
||||
filters = dict(min_memory_mb=16384, min_root_gb=80)
|
||||
expected = ['m1.xlarge']
|
||||
self.assertFilterResults(filters, expected)
|
||||
|
|
|
@ -29,7 +29,8 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
|
|||
values = dict(name="cg1.4xlarge",
|
||||
memory_mb=22000,
|
||||
vcpus=8,
|
||||
local_gb=1690,
|
||||
root_gb=1690,
|
||||
ephemeral_gb=2000,
|
||||
flavorid=105)
|
||||
specs = dict(cpu_arch="x86_64",
|
||||
cpu_model="Nehalem",
|
||||
|
|
|
@ -303,7 +303,8 @@ class LibvirtConnTestCase(test.TestCase):
|
|||
'project_id': 'fake',
|
||||
'bridge': 'br101',
|
||||
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
||||
'local_gb': 20,
|
||||
'root_gb': 10,
|
||||
'ephemeral_gb': 20,
|
||||
'instance_type_id': '5'} # m1.small
|
||||
|
||||
def create_fake_libvirt_mock(self, **kwargs):
|
||||
|
@ -1625,7 +1626,8 @@ class NWFilterTestCase(test.TestCase):
|
|||
inst['name'] = 'm1.small'
|
||||
inst['memory_mb'] = '1024'
|
||||
inst['vcpus'] = '1'
|
||||
inst['local_gb'] = '20'
|
||||
inst['root_gb'] = '10'
|
||||
inst['ephemeral_gb'] = '20'
|
||||
inst['flavorid'] = '1'
|
||||
inst['swap'] = '2048'
|
||||
inst['rxtx_factor'] = 1
|
||||
|
|
|
@ -91,13 +91,13 @@ class QuotaTestCase(test.TestCase):
|
|||
|
||||
def _get_instance_type(self, name):
|
||||
instance_types = {
|
||||
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
|
||||
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
|
||||
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
|
||||
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
|
||||
'm1.medium':
|
||||
dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
|
||||
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
|
||||
dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
|
||||
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
|
||||
'm1.xlarge':
|
||||
dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
|
||||
dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
|
||||
return instance_types[name]
|
||||
|
||||
def test_quota_overrides(self):
|
||||
|
|
|
@ -107,7 +107,7 @@ class XenAPIVolumeTestCase(test.TestCase):
|
|||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'local_gb': 20,
|
||||
'root_gb': 20,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'os_type': 'linux',
|
||||
'architecture': 'x86-64'}
|
||||
|
@ -414,7 +414,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||
'image_ref': image_ref,
|
||||
'kernel_id': kernel_id,
|
||||
'ramdisk_id': ramdisk_id,
|
||||
'local_gb': 20,
|
||||
'root_gb': 20,
|
||||
'instance_type_id': instance_type_id,
|
||||
'os_type': os_type,
|
||||
'hostname': hostname,
|
||||
|
@ -700,7 +700,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'local_gb': 20,
|
||||
'root_gb': 20,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'os_type': 'linux',
|
||||
'architecture': 'x86-64'}
|
||||
|
@ -797,7 +797,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||
'image_ref': 1,
|
||||
'kernel_id': None,
|
||||
'ramdisk_id': None,
|
||||
'local_gb': 5,
|
||||
'root_gb': 5,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'os_type': 'linux',
|
||||
'architecture': 'x86-64'}
|
||||
|
@ -874,7 +874,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||
self.fake_finish_revert_migration_called = True
|
||||
|
||||
self.stubs.Set(stubs.FakeSessionForMigrationTests,
|
||||
"VDI_resize_online", fake_vdi_resize)
|
||||
"VDI_resize_online", fake_vdi_resize)
|
||||
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
|
||||
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
|
||||
fake_finish_revert_migration)
|
||||
|
@ -889,7 +889,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||
'gateway_v6': 'dead:beef::1',
|
||||
'ip6s': [{'enabled': '1',
|
||||
'ip': 'dead:beef::dcad:beff:feef:0',
|
||||
'netmask': '64'}],
|
||||
'netmask': '64'}],
|
||||
'ips': [{'enabled': '1',
|
||||
'ip': '192.168.0.100',
|
||||
'netmask': '255.255.255.0'}],
|
||||
|
@ -917,9 +917,9 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||
def fake_vdi_resize(*args, **kwargs):
|
||||
self.called = True
|
||||
|
||||
self.stubs.Set(stubs.FakeSessionForMigrationTests,
|
||||
"VDI_resize_online", fake_vdi_resize)
|
||||
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
|
||||
self.stubs.Set(stubs.FakeSessionForMigrationTests,
|
||||
"VDI_resize_online", fake_vdi_resize)
|
||||
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||
stubs.stubout_loopingcall_start(self.stubs)
|
||||
|
@ -949,14 +949,14 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||
tiny_type_id = \
|
||||
instance_types.get_instance_type_by_name('m1.tiny')['id']
|
||||
self.instance_values.update({'instance_type_id': tiny_type_id,
|
||||
'local_gb': 0})
|
||||
'root_gb': 0})
|
||||
instance = db.instance_create(self.context, self.instance_values)
|
||||
|
||||
def fake_vdi_resize(*args, **kwargs):
|
||||
raise Exception("This shouldn't be called")
|
||||
|
||||
self.stubs.Set(stubs.FakeSessionForMigrationTests,
|
||||
"VDI_resize_online", fake_vdi_resize)
|
||||
"VDI_resize_online", fake_vdi_resize)
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||
stubs.stubout_loopingcall_start(self.stubs)
|
||||
conn = xenapi_conn.get_connection(False)
|
||||
|
@ -1157,7 +1157,7 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
|
|||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'local_gb': 20,
|
||||
'root_gb': 20,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'os_type': 'linux',
|
||||
'architecture': 'x86-64'}
|
||||
|
@ -1226,6 +1226,87 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
|
|||
self.assertIsPartitionCalled(True)
|
||||
|
||||
|
||||
class XenAPIGenerateLocal(test.TestCase):
|
||||
"""Test generating of local disks, like swap and ephemeral"""
|
||||
def setUp(self):
|
||||
super(XenAPIGenerateLocal, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.flags(target_host='127.0.0.1',
|
||||
xenapi_connection_url='test_url',
|
||||
xenapi_connection_password='test_pass',
|
||||
xenapi_generate_swap=True,
|
||||
firewall_driver='nova.virt.xenapi.firewall.'
|
||||
'Dom0IptablesFirewallDriver')
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
||||
db_fakes.stub_out_db_instance_api(self.stubs)
|
||||
xenapi_fake.reset()
|
||||
self.conn = xenapi_conn.get_connection(False)
|
||||
|
||||
self.user_id = 'fake'
|
||||
self.project_id = 'fake'
|
||||
|
||||
self.instance_values = {'id': 1,
|
||||
'project_id': self.project_id,
|
||||
'user_id': self.user_id,
|
||||
'image_ref': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'root_gb': 20,
|
||||
'instance_type_id': '3', # m1.large
|
||||
'os_type': 'linux',
|
||||
'architecture': 'x86-64'}
|
||||
|
||||
self.context = context.RequestContext(self.user_id, self.project_id)
|
||||
|
||||
@classmethod
|
||||
def fake_create_vbd(cls, session, vm_ref, vdi_ref, userdevice,
|
||||
bootable=True):
|
||||
pass
|
||||
|
||||
self.stubs.Set(volume_utils.VolumeHelper,
|
||||
"create_vbd",
|
||||
fake_create_vbd)
|
||||
|
||||
def assertCalled(self, instance):
|
||||
disk_image_type = vm_utils.ImageType.DISK_VHD
|
||||
vm_ref = "blah"
|
||||
first_vdi_ref = "blah"
|
||||
vdis = ["blah"]
|
||||
|
||||
self.called = False
|
||||
self.conn._vmops._attach_disks(instance, disk_image_type,
|
||||
vm_ref, first_vdi_ref, vdis)
|
||||
self.assertTrue(self.called)
|
||||
|
||||
def test_generate_swap(self):
|
||||
"""Test swap disk generation."""
|
||||
instance = db.instance_create(self.context, self.instance_values)
|
||||
instance = db.instance_update(self.context, instance['id'],
|
||||
{'instance_type_id': 5})
|
||||
|
||||
@classmethod
|
||||
def fake_generate_swap(cls, *args, **kwargs):
|
||||
self.called = True
|
||||
self.stubs.Set(vm_utils.VMHelper, 'generate_swap',
|
||||
fake_generate_swap)
|
||||
|
||||
self.assertCalled(instance)
|
||||
|
||||
def test_generate_ephemeral(self):
|
||||
"""Test ephemeral disk generation."""
|
||||
instance = db.instance_create(self.context, self.instance_values)
|
||||
instance = db.instance_update(self.context, instance['id'],
|
||||
{'instance_type_id': 4})
|
||||
|
||||
@classmethod
|
||||
def fake_generate_ephemeral(cls, *args):
|
||||
self.called = True
|
||||
self.stubs.Set(vm_utils.VMHelper, 'generate_ephemeral',
|
||||
fake_generate_ephemeral)
|
||||
|
||||
self.assertCalled(instance)
|
||||
|
||||
|
||||
class XenAPIBWUsageTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(XenAPIBWUsageTestCase, self).setUp()
|
||||
|
|
|
@ -41,7 +41,8 @@ def get_test_instance_type(context=None):
|
|||
test_instance_type = {'name': 'kinda.big',
|
||||
'memory_mb': 2048,
|
||||
'vcpus': 4,
|
||||
'local_gb': 40,
|
||||
'root_gb': 40,
|
||||
'ephemeral_gb': 80,
|
||||
'swap': 1024}
|
||||
|
||||
instance_type_ref = nova.db.instance_type_create(context,
|
||||
|
@ -57,6 +58,7 @@ def get_test_instance(context=None):
|
|||
'basepath': '/some/path',
|
||||
'bridge_name': 'br100',
|
||||
'vcpus': 2,
|
||||
'root_gb': 10,
|
||||
'project_id': 'fake',
|
||||
'bridge': 'br101',
|
||||
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||
|
|
|
@ -31,13 +31,13 @@ def stub_out_db_instance_api(stubs):
|
|||
"""Stubs out the db API for creating Instances."""
|
||||
|
||||
INSTANCE_TYPES = {
|
||||
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
|
||||
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
|
||||
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
|
||||
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
|
||||
'm1.medium':
|
||||
dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
|
||||
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
|
||||
dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
|
||||
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
|
||||
'm1.xlarge':
|
||||
dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
|
||||
dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
|
||||
|
||||
class FakeModel(object):
|
||||
"""Stubs out for model."""
|
||||
|
@ -76,7 +76,7 @@ def stub_out_db_instance_api(stubs):
|
|||
'memory_mb': type_data['memory_mb'],
|
||||
'vcpus': type_data['vcpus'],
|
||||
'mac_addresses': [{'address': values['mac_address']}],
|
||||
'local_gb': type_data['local_gb'],
|
||||
'root_gb': type_data['root_gb'],
|
||||
}
|
||||
return FakeModel(base_options)
|
||||
|
||||
|
|
|
@ -289,10 +289,15 @@ def stub_out_vm_methods(stubs):
|
|||
def fake_spawn_rescue(self, context, inst, network_info, image_meta):
|
||||
inst._rescue = False
|
||||
|
||||
@classmethod
|
||||
def fake_generate_ephemeral(cls, *args):
|
||||
pass
|
||||
|
||||
stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown)
|
||||
stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
|
||||
stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
|
||||
stubs.Set(vmops.VMOps, "spawn_rescue", fake_spawn_rescue)
|
||||
stubs.Set(vm_utils.VMHelper, 'generate_ephemeral', fake_generate_ephemeral)
|
||||
|
||||
|
||||
class FakeSessionForVolumeTests(fake.SessionBase):
|
||||
|
@ -383,6 +388,10 @@ def stub_out_migration_methods(stubs):
|
|||
def fake_reset_network(*args, **kwargs):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def fake_generate_ephemeral(cls, *args):
|
||||
pass
|
||||
|
||||
stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
|
||||
stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr)
|
||||
stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr)
|
||||
|
@ -392,3 +401,4 @@ def stub_out_migration_methods(stubs):
|
|||
stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path)
|
||||
stubs.Set(vmops.VMOps, 'reset_network', fake_reset_network)
|
||||
stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown)
|
||||
stubs.Set(vm_utils.VMHelper, 'generate_ephemeral', fake_generate_ephemeral)
|
||||
|
|
|
@ -409,7 +409,7 @@ def usage_from_instance(instance_ref, network_info=None, **kw):
|
|||
instance_type=instance_ref['instance_type']['name'],
|
||||
instance_type_id=instance_ref['instance_type_id'],
|
||||
memory_mb=instance_ref['memory_mb'],
|
||||
disk_gb=instance_ref['local_gb'],
|
||||
disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'],
|
||||
display_name=instance_ref['display_name'],
|
||||
created_at=str(instance_ref['created_at']),
|
||||
launched_at=str(instance_ref['launched_at']) \
|
||||
|
|
|
@ -39,8 +39,6 @@ from nova.virt.disk import nbd
|
|||
|
||||
LOG = logging.getLogger('nova.compute.disk')
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
|
||||
'minimum size in bytes of root partition')
|
||||
flags.DEFINE_string('injected_network_template',
|
||||
utils.abspath('virt/interfaces.template'),
|
||||
'Template file for injected network')
|
||||
|
|
|
@ -79,11 +79,11 @@
|
|||
<target dev='${root_device}' bus='${root_disk_bus}'/>
|
||||
</disk>
|
||||
#end if
|
||||
#if $getVar('local_device', False)
|
||||
#if $getVar('ephemeral_device', False)
|
||||
<disk type='file'>
|
||||
<driver type='${driver_type}'/>
|
||||
<source file='${basepath}/disk.local'/>
|
||||
<target dev='${local_device}' bus='${ephemeral_disk_bus}'/>
|
||||
<target dev='${ephemeral_device}' bus='${ephemeral_disk_bus}'/>
|
||||
</disk>
|
||||
#end if
|
||||
#for $eph in $ephemerals
|
||||
|
|
|
@ -131,10 +131,6 @@ flags.DEFINE_list('libvirt_volume_drivers',
|
|||
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
|
||||
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver'],
|
||||
'Libvirt handlers for remote volumes.')
|
||||
flags.DEFINE_string('default_local_format',
|
||||
None,
|
||||
'The default format a local_volume will be formatted with '
|
||||
'on creation.')
|
||||
flags.DEFINE_bool('libvirt_use_virtio_for_bridges',
|
||||
False,
|
||||
'Use virtio for bridge interfaces')
|
||||
|
@ -195,7 +191,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
else:
|
||||
self._disk_prefix = disk_prefix_map.get(FLAGS.libvirt_type, 'vd')
|
||||
self.default_root_device = self._disk_prefix + 'a'
|
||||
self.default_local_device = self._disk_prefix + 'b'
|
||||
self.default_ephemeral_device = self._disk_prefix + 'b'
|
||||
self.default_swap_device = self._disk_prefix + 'c'
|
||||
|
||||
@property
|
||||
|
@ -834,15 +830,15 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
"""Create a blank image of specified size"""
|
||||
|
||||
if not fs_format:
|
||||
fs_format = FLAGS.default_local_format
|
||||
fs_format = FLAGS.default_ephemeral_format
|
||||
|
||||
libvirt_utils.create_image('raw', target,
|
||||
'%d%c' % (local_size, unit))
|
||||
if fs_format:
|
||||
libvirt_utils.mkfs(fs_format, target)
|
||||
|
||||
def _create_ephemeral(self, target, local_size, fs_label, os_type):
|
||||
self._create_local(target, local_size)
|
||||
def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
|
||||
self._create_local(target, ephemeral_size)
|
||||
disk.mkfs(os_type, fs_label, target)
|
||||
|
||||
@staticmethod
|
||||
|
@ -901,13 +897,15 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
project_id=inst['project_id'])
|
||||
|
||||
root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
|
||||
size = FLAGS.minimum_root_size
|
||||
size = inst['root_gb'] * 1024 * 1024 * 1024
|
||||
|
||||
inst_type_id = inst['instance_type_id']
|
||||
inst_type = instance_types.get_instance_type(inst_type_id)
|
||||
if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
|
||||
size = None
|
||||
root_fname += "_sm"
|
||||
else:
|
||||
root_fname += "_%d" % inst['root_gb']
|
||||
|
||||
if not self._volume_in_mapping(self.default_root_device,
|
||||
block_device_info):
|
||||
|
@ -921,18 +919,18 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
project_id=inst['project_id'],
|
||||
size=size)
|
||||
|
||||
local_gb = inst['local_gb']
|
||||
if local_gb and not self._volume_in_mapping(
|
||||
self.default_local_device, block_device_info):
|
||||
ephemeral_gb = inst['ephemeral_gb']
|
||||
if ephemeral_gb and not self._volume_in_mapping(
|
||||
self.default_ephemeral_device, block_device_info):
|
||||
fn = functools.partial(self._create_ephemeral,
|
||||
fs_label='ephemeral0',
|
||||
os_type=inst.os_type)
|
||||
self._cache_image(fn=fn,
|
||||
target=basepath('disk.local'),
|
||||
fname="ephemeral_%s_%s_%s" %
|
||||
("0", local_gb, inst.os_type),
|
||||
("0", ephemeral_gb, inst.os_type),
|
||||
cow=FLAGS.use_cow_images,
|
||||
local_size=local_gb)
|
||||
ephemeral_size=ephemeral_gb)
|
||||
|
||||
for eph in driver.block_device_info_get_ephemerals(block_device_info):
|
||||
fn = functools.partial(self._create_ephemeral,
|
||||
|
@ -943,7 +941,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
fname="ephemeral_%s_%s_%s" %
|
||||
(eph['num'], eph['size'], inst.os_type),
|
||||
cow=FLAGS.use_cow_images,
|
||||
local_size=eph['size'])
|
||||
ephemeral_size=eph['size'])
|
||||
|
||||
swap_mb = 0
|
||||
|
||||
|
@ -1119,14 +1117,14 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
ebs_root = self._volume_in_mapping(self.default_root_device,
|
||||
block_device_info)
|
||||
|
||||
local_device = False
|
||||
if not (self._volume_in_mapping(self.default_local_device,
|
||||
ephemeral_device = False
|
||||
if not (self._volume_in_mapping(self.default_ephemeral_device,
|
||||
block_device_info) or
|
||||
0 in [eph['num'] for eph in
|
||||
driver.block_device_info_get_ephemerals(
|
||||
block_device_info)]):
|
||||
if instance['local_gb'] > 0:
|
||||
local_device = self.default_local_device
|
||||
if instance['ephemeral_gb'] > 0:
|
||||
ephemeral_device = self.default_ephemeral_device
|
||||
|
||||
ephemerals = []
|
||||
for eph in driver.block_device_info_get_ephemerals(block_device_info):
|
||||
|
@ -1147,7 +1145,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
'vif_type': FLAGS.libvirt_vif_type,
|
||||
'nics': nics,
|
||||
'ebs_root': ebs_root,
|
||||
'local_device': local_device,
|
||||
'ephemeral_device': ephemeral_device,
|
||||
'volumes': volumes,
|
||||
'use_virtio_for_bridges':
|
||||
FLAGS.libvirt_use_virtio_for_bridges,
|
||||
|
@ -1165,10 +1163,11 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
nova_context.get_admin_context(), instance['id'],
|
||||
{'root_device_name': '/dev/' + self.default_root_device})
|
||||
|
||||
if local_device:
|
||||
if ephemeral_device:
|
||||
db.instance_update(
|
||||
nova_context.get_admin_context(), instance['id'],
|
||||
{'default_local_device': '/dev/' + self.default_local_device})
|
||||
{'default_ephemeral_device':
|
||||
'/dev/' + self.default_ephemeral_device})
|
||||
|
||||
swap = driver.block_device_info_get_swap(block_device_info)
|
||||
if driver.swap_is_usable(swap):
|
||||
|
@ -1790,7 +1789,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
|||
image_id=instance_ref['image_ref'],
|
||||
user_id=instance_ref['user_id'],
|
||||
project_id=instance_ref['project_id'],
|
||||
size=instance_ref['local_gb'])
|
||||
size=instance_ref['ephemeral_gb'])
|
||||
|
||||
libvirt_utils.create_cow_image(backing_file, instance_disk)
|
||||
|
||||
|
|
|
@ -393,7 +393,7 @@ class VMHelper(HelperBase):
|
|||
# Resize partition and filesystem down
|
||||
cls.auto_configure_disk(session=session,
|
||||
vdi_ref=copy_ref,
|
||||
new_gb=instance_type['local_gb'])
|
||||
new_gb=instance_type['root_gb'])
|
||||
|
||||
# Create new VDI
|
||||
new_ref = cls.fetch_blank_disk(session,
|
||||
|
@ -401,7 +401,7 @@ class VMHelper(HelperBase):
|
|||
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
|
||||
|
||||
# Manually copy contents over
|
||||
virtual_size = instance_type['local_gb'] * 1024 * 1024 * 1024
|
||||
virtual_size = instance_type['root_gb'] * 1024 * 1024 * 1024
|
||||
_copy_partition(session, copy_ref, new_ref, 1, virtual_size)
|
||||
|
||||
return new_ref, new_uuid
|
||||
|
@ -411,7 +411,7 @@ class VMHelper(HelperBase):
|
|||
@classmethod
|
||||
def auto_configure_disk(cls, session, vdi_ref, new_gb):
|
||||
"""Partition and resize FS to match the size specified by
|
||||
instance_types.local_gb.
|
||||
instance_types.root_gb.
|
||||
|
||||
This is a fail-safe to prevent accidentally destroying data on a disk
|
||||
erroneously marked as auto_disk_config=True.
|
||||
|
@ -437,48 +437,52 @@ class VMHelper(HelperBase):
|
|||
_resize_part_and_fs(dev, start, old_sectors, new_sectors)
|
||||
|
||||
@classmethod
|
||||
def generate_swap(cls, session, instance, vm_ref, userdevice, swap_mb):
|
||||
def _generate_disk(cls, session, instance, vm_ref, userdevice, name,
|
||||
size_mb, fs_type):
|
||||
"""
|
||||
Steps to programmatically generate swap:
|
||||
Steps to programmatically generate a disk:
|
||||
|
||||
1. Create VDI of desired swap size
|
||||
1. Create VDI of desired size
|
||||
|
||||
2. Attach VDI to compute worker
|
||||
|
||||
3. Create swap partition
|
||||
3. Create partition
|
||||
|
||||
4. Create VBD between instance VM and swap VDI
|
||||
4. Create VBD between instance VM and VDI
|
||||
"""
|
||||
# 1. Create VDI
|
||||
sr_ref = cls.safe_find_sr(session)
|
||||
name_label = instance.name + "-swap"
|
||||
name_label = '%s-%s' % (instance.name, name)
|
||||
ONE_MEG = 1024 * 1024
|
||||
virtual_size = swap_mb * ONE_MEG
|
||||
virtual_size = size_mb * ONE_MEG
|
||||
vdi_ref = cls.create_vdi(
|
||||
session, sr_ref, name_label, virtual_size, read_only=False)
|
||||
|
||||
try:
|
||||
# 2. Attach VDI to compute worker (VBD hotplug)
|
||||
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
|
||||
# 3. Create swap partition
|
||||
|
||||
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
|
||||
# partition because that is what parted supports.
|
||||
is_windows = instance.os_type == "windows"
|
||||
fs_type = "fat32" if is_windows else "linux-swap"
|
||||
|
||||
# 3. Create partition
|
||||
dev_path = utils.make_dev_path(dev)
|
||||
utils.execute('parted', '--script', dev_path,
|
||||
'mklabel', 'msdos', run_as_root=True)
|
||||
|
||||
partition_start = 0
|
||||
partition_end = swap_mb
|
||||
utils.execute('parted', '--script', dev_path, 'mkpartfs',
|
||||
'primary', fs_type,
|
||||
partition_end = size_mb
|
||||
utils.execute('parted', '--script', dev_path,
|
||||
'mkpart', 'primary',
|
||||
str(partition_start),
|
||||
str(partition_end),
|
||||
run_as_root=True)
|
||||
|
||||
partition_path = utils.make_dev_path(dev, partition=1)
|
||||
|
||||
if fs_type == 'linux-swap':
|
||||
utils.execute('mkswap', partition_path,
|
||||
run_as_root=True)
|
||||
elif fs_type is not None:
|
||||
utils.execute('mkfs', '-t', fs_type, partition_path,
|
||||
run_as_root=True)
|
||||
|
||||
# 4. Create VBD between instance VM and swap VDI
|
||||
volume_utils.VolumeHelper.create_vbd(
|
||||
session, vm_ref, vdi_ref, userdevice, bootable=False)
|
||||
|
@ -486,12 +490,29 @@ class VMHelper(HelperBase):
|
|||
with utils.save_and_reraise_exception():
|
||||
cls.destroy_vdi(session, vdi_ref)
|
||||
|
||||
@classmethod
|
||||
def generate_swap(cls, session, instance, vm_ref, userdevice, swap_mb):
|
||||
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
|
||||
# partition because that is what parted supports.
|
||||
is_windows = instance.os_type == "windows"
|
||||
fs_type = "fat32" if is_windows else "linux-swap"
|
||||
|
||||
cls._generate_disk(session, instance, vm_ref, userdevice,
|
||||
'swap', swap_mb, fs_type)
|
||||
|
||||
@classmethod
|
||||
def generate_ephemeral(cls, session, instance, vm_ref, userdevice,
|
||||
size_gb):
|
||||
cls._generate_disk(session, instance, vm_ref, userdevice,
|
||||
'ephemeral', size_gb * 1024,
|
||||
FLAGS.default_ephemeral_format)
|
||||
|
||||
@classmethod
|
||||
def fetch_blank_disk(cls, session, instance_type_id):
|
||||
# Size the blank harddrive to suit the machine type:
|
||||
one_gig = 1024 * 1024 * 1024
|
||||
req_type = instance_types.get_instance_type(instance_type_id)
|
||||
req_size = req_type['local_gb']
|
||||
req_size = req_type['root_gb']
|
||||
|
||||
LOG.debug("Creating blank HD of size %(req_size)d gigs"
|
||||
% locals())
|
||||
|
@ -595,7 +616,7 @@ class VMHelper(HelperBase):
|
|||
# refactor this to a common area
|
||||
instance_type_id = instance['instance_type_id']
|
||||
instance_type = instance_types.get_instance_type(instance_type_id)
|
||||
allowed_size_gb = instance_type['local_gb']
|
||||
allowed_size_gb = instance_type['root_gb']
|
||||
allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
|
||||
|
||||
LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes="
|
||||
|
|
|
@ -371,7 +371,7 @@ class VMOps(object):
|
|||
instance.instance_type_id)
|
||||
VMHelper.auto_configure_disk(session=self._session,
|
||||
vdi_ref=first_vdi_ref,
|
||||
new_gb=instance_type['local_gb'])
|
||||
new_gb=instance_type['root_gb'])
|
||||
|
||||
VolumeHelper.create_vbd(session=self._session, vm_ref=vm_ref,
|
||||
vdi_ref=first_vdi_ref,
|
||||
|
@ -390,6 +390,13 @@ class VMOps(object):
|
|||
swap_mb=swap_mb)
|
||||
userdevice += 1
|
||||
|
||||
ephemeral_gb = instance_type['ephemeral_gb']
|
||||
if ephemeral_gb:
|
||||
VMHelper.generate_ephemeral(self._session, instance,
|
||||
vm_ref, userdevice,
|
||||
ephemeral_gb)
|
||||
userdevice += 1
|
||||
|
||||
# Attach any other disks
|
||||
for vdi in vdis[1:]:
|
||||
if generate_swap and vdi['vdi_type'] == 'swap':
|
||||
|
@ -703,10 +710,10 @@ class VMOps(object):
|
|||
sr_path = VMHelper.get_sr_path(self._session)
|
||||
|
||||
if instance['auto_disk_config'] and \
|
||||
instance['local_gb'] > instance_type['local_gb']:
|
||||
instance['root_gb'] > instance_type['root_gb']:
|
||||
# Resizing disk storage down
|
||||
old_gb = instance['local_gb']
|
||||
new_gb = instance_type['local_gb']
|
||||
old_gb = instance['root_gb']
|
||||
new_gb = instance_type['root_gb']
|
||||
|
||||
LOG.debug(_("Resizing down VDI %(cow_uuid)s from "
|
||||
"%(old_gb)dGB to %(new_gb)dGB") % locals())
|
||||
|
@ -816,7 +823,7 @@ class VMOps(object):
|
|||
"""Resize a running instance by changing its disk size."""
|
||||
#TODO(mdietz): this will need to be adjusted for swap later
|
||||
|
||||
new_disk_size = instance.local_gb * 1024 * 1024 * 1024
|
||||
new_disk_size = instance.root_gb * 1024 * 1024 * 1024
|
||||
if not new_disk_size:
|
||||
return
|
||||
|
||||
|
@ -828,7 +835,7 @@ class VMOps(object):
|
|||
|
||||
instance_name = instance.name
|
||||
old_gb = virtual_size / (1024 * 1024 * 1024)
|
||||
new_gb = instance.local_gb
|
||||
new_gb = instance.root_gb
|
||||
|
||||
if virtual_size < new_disk_size:
|
||||
# Resize up. Simple VDI resize will do the trick
|
||||
|
|
Loading…
Reference in New Issue