Merge "Add root disk partitions support"
This commit is contained in:
commit
41cc9dd49f
@ -550,6 +550,13 @@ nodes:
|
||||
in: body
|
||||
required: true
|
||||
type: object
|
||||
partitions:
|
||||
description: |
|
||||
The partitions info for root disk, this is only allowed when using partition images
|
||||
with root_gb(required), ephemeral_gb, and swap_mb.
|
||||
in: body
|
||||
required: false
|
||||
type: object
|
||||
personality:
|
||||
description: |
|
||||
The file path and contents, text only, to inject into the server at launch. The
|
||||
|
@ -16,6 +16,11 @@
|
||||
"metadata" : {
|
||||
"My Server Name" : "Apache1"
|
||||
},
|
||||
"partitions" : {
|
||||
"root_gb": 100,
|
||||
"ephemeral_gb": 400,
|
||||
"swap_mb": 40960
|
||||
},
|
||||
"personality": [
|
||||
{
|
||||
"path": "/etc/banner.txt",
|
||||
|
@ -24,6 +24,11 @@
|
||||
"project_id": "2f15c3524826465a9afbd150478b3b76",
|
||||
"user_id": "a6205fcab03d4a289251f420456b1289",
|
||||
"addresses": {},
|
||||
"partitions" : {
|
||||
"root_gb": 100,
|
||||
"ephemeral_gb": 400,
|
||||
"swap_mb": 40960
|
||||
},
|
||||
"metadata": {
|
||||
"My Server Name" : "Apache1"
|
||||
}
|
||||
|
@ -5,21 +5,11 @@
|
||||
"image_uuid": "ac3b2291-b9ef-45f6-8eeb-21ac568a64a5",
|
||||
"server_type_uuid": "28708dff-283c-449e-9bfa-a48c93480c86",
|
||||
"name": "test_server",
|
||||
"network_info": {
|
||||
"12cffc4a-b845-409e-b589-7c84be4b10d9": {
|
||||
"fixed_ips": [
|
||||
{
|
||||
"ip_address": "172.24.4.4",
|
||||
"subnet_id": "a9d47430-f90b-4513-af5f-6315af54de7d"
|
||||
},
|
||||
{
|
||||
"ip_address": "2001:db8::a",
|
||||
"subnet_id": "5e7b3e2d-f36f-4e30-874c-16c2d126fe53"
|
||||
}
|
||||
],
|
||||
"mac_address": "52:54:00:6c:c4:17",
|
||||
"network": "ade2b658-929b-439f-9528-c47057960942"
|
||||
}
|
||||
"addresses": {},
|
||||
"partitions" : {
|
||||
"root_gb": 100,
|
||||
"ephemeral_gb": 400,
|
||||
"swap_mb": 40960
|
||||
},
|
||||
"power_state": "power on",
|
||||
"project_id": "c18e8a1a870d4c08a0b51ced6e0b6459",
|
||||
|
@ -23,6 +23,11 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"partitions" : {
|
||||
"root_gb": 100,
|
||||
"ephemeral_gb": 400,
|
||||
"swap_mb": 40960
|
||||
},
|
||||
"power_state": "power on",
|
||||
"project_id": "c18e8a1a870d4c08a0b51ced6e0b6459",
|
||||
"status": "building",
|
||||
|
@ -25,6 +25,11 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"partitions" : {
|
||||
"root_gb": 100,
|
||||
"ephemeral_gb": 400,
|
||||
"swap_mb": 40960
|
||||
},
|
||||
"power_state": "power on",
|
||||
"project_id": "c18e8a1a870d4c08a0b51ced6e0b6459",
|
||||
"status": "building",
|
||||
|
@ -31,6 +31,11 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"partitions" : {
|
||||
"root_gb": 100,
|
||||
"ephemeral_gb": 400,
|
||||
"swap_mb": 40960
|
||||
},
|
||||
"metadata": {
|
||||
"k1": "v1",
|
||||
"k2": "v2"
|
||||
|
@ -43,6 +43,7 @@ Request
|
||||
- user_data: user_data
|
||||
- personality: personality
|
||||
- key_name: key_name
|
||||
- partitions: partitions
|
||||
- scheduler_hints: scheduler_hints
|
||||
|
||||
**Example Create Server: JSON request**
|
||||
@ -73,6 +74,7 @@ Response
|
||||
- metadata: metadata
|
||||
- affinity_zone: affinity_zone
|
||||
- key_name: key_name
|
||||
- partitions: partitions
|
||||
|
||||
**Example Create Server: JSON response**
|
||||
|
||||
@ -226,6 +228,7 @@ Response
|
||||
- metadata: metadata
|
||||
- affinity_zone: affinity_zone
|
||||
- key_name: key_name
|
||||
- partitions: partitions
|
||||
|
||||
**Example Detailed list of Servers: JSON response**
|
||||
|
||||
@ -280,6 +283,7 @@ Response
|
||||
- metadata: metadata
|
||||
- affinity_zone: affinity_zone
|
||||
- key_name: key_name
|
||||
- partitions: partitions
|
||||
|
||||
**Example Server Details: JSON response**
|
||||
|
||||
@ -342,6 +346,7 @@ Response
|
||||
- metadata: metadata
|
||||
- affinity_zone: affinity_zone
|
||||
- key_name: key_name
|
||||
- partitions: partitions
|
||||
|
||||
**Example Update Server: JSON response**
|
||||
|
||||
|
@ -48,11 +48,21 @@ create_server = {
|
||||
'min_count': {'type': 'integer', 'minimum': 1},
|
||||
'max_count': {'type': 'integer', 'minimum': 1},
|
||||
'metadata': parameter_types.metadata,
|
||||
'partitions': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'root_gb': parameter_types.positive_integer,
|
||||
'ephemeral_gb': parameter_types.non_negative_integer,
|
||||
'swap_mb': parameter_types.non_negative_integer,
|
||||
},
|
||||
'required': ['root_gb'],
|
||||
'additionalProperties': False,
|
||||
},
|
||||
},
|
||||
'required': ['name', 'image_uuid', 'flavor_uuid', 'networks'],
|
||||
'additionalProperties': False,
|
||||
},
|
||||
"scheduler_hints": {
|
||||
'scheduler_hints': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'group': parameter_types.server_group_id
|
||||
|
@ -508,8 +508,9 @@ class ServerPatchType(types.JsonPatchType):
|
||||
defaults = types.JsonPatchType.internal_attrs()
|
||||
return defaults + ['/project_id', '/user_id', '/status',
|
||||
'/power_state', '/availability_zone',
|
||||
'/flavor_uuid', '/image_uuid',
|
||||
'/nics', '/launched_at', '/affinity_zone']
|
||||
'/flavor_uuid', '/image_uuid', '/addresses',
|
||||
'/launched_at', '/affinity_zone', '/key_name',
|
||||
'/partitions', '/fault', '/node']
|
||||
|
||||
|
||||
class ServerCollection(base.APIBase):
|
||||
@ -720,6 +721,7 @@ class ServerController(ServerControllerBase):
|
||||
image_uuid = server.get('image_uuid')
|
||||
user_data = server.get('user_data')
|
||||
key_name = server.get('key_name')
|
||||
partitions = server.get('partitions')
|
||||
personality = server.pop('personality', None)
|
||||
|
||||
injected_files = []
|
||||
@ -744,6 +746,7 @@ class ServerController(ServerControllerBase):
|
||||
key_name=key_name,
|
||||
min_count=min_count,
|
||||
max_count=max_count,
|
||||
partitions=partitions,
|
||||
scheduler_hints=scheduler_hints)
|
||||
# Set the HTTP Location Header for the first server.
|
||||
pecan.response.location = link.build_url('server', servers[0].uuid)
|
||||
|
@ -20,7 +20,13 @@ Common parameter types for validating request Body.
|
||||
|
||||
positive_integer = {
|
||||
'type': ['integer', 'string'],
|
||||
'pattern': '^[0-9]*$', 'minimum': 1
|
||||
'pattern': '^[0-9]*$', 'minimum': 1, 'minLength': 1
|
||||
}
|
||||
|
||||
|
||||
non_negative_integer = {
|
||||
'type': ['integer', 'string'],
|
||||
'pattern': '^[0-9]*$', 'minimum': 0, 'minLength': 1
|
||||
}
|
||||
|
||||
|
||||
|
@ -84,12 +84,13 @@ class BaseEngineDriver(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def spawn(self, context, server, configdrive_value):
|
||||
def spawn(self, context, server, configdrive_value, partitions):
|
||||
"""Create a new server on the provision platform.
|
||||
|
||||
:param context: security context
|
||||
:param server: moga server object.
|
||||
:param configdrive_value: The configdrive value to be injected.
|
||||
:param configdrive_value: configdrive value to be injected.
|
||||
:param partitions: root disk partitions
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
@ -136,7 +136,8 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
}
|
||||
return dic
|
||||
|
||||
def _add_server_info_to_node(self, node, server, preserve_ephemeral=None):
|
||||
def _add_server_info_to_node(self, node, server, preserve_ephemeral=None,
|
||||
partitions=None):
|
||||
patch = list()
|
||||
# Associate the node with a server
|
||||
patch.append({'path': '/instance_uuid', 'op': 'add',
|
||||
@ -144,12 +145,20 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
# Add the required fields to deploy a node.
|
||||
patch.append({'path': '/instance_info/image_source', 'op': 'add',
|
||||
'value': server.image_uuid})
|
||||
# TODO(zhenguo) Add partition support
|
||||
patch.append({'path': '/instance_info/root_gb', 'op': 'add',
|
||||
'value': str(node.properties.get('local_gb', 0))})
|
||||
if preserve_ephemeral is not None:
|
||||
patch.append({'path': '/instance_info/preserve_ephemeral',
|
||||
'op': 'add', 'value': str(preserve_ephemeral)})
|
||||
if partitions:
|
||||
patch.append({'path': '/instance_info/root_gb', 'op': 'add',
|
||||
'value': str(partitions.get('root_gb', 0))})
|
||||
patch.append({'path': '/instance_info/ephemeral_gb', 'op': 'add',
|
||||
'value': str(partitions.get('ephemeral_gb', 0))})
|
||||
patch.append({'path': '/instance_info/swap_mb', 'op': 'add',
|
||||
'value': str(partitions.get('swap_mb', 0))})
|
||||
# Local boot support with partition images, **must** contain
|
||||
# ``grub2`` installed within it
|
||||
patch.append({'path': '/instance_info/capabilities',
|
||||
'op': 'add', 'value': '{"boot_option": "local"}'})
|
||||
|
||||
try:
|
||||
# FIXME(lucasagomes): The "retry_on_conflict" parameter was added
|
||||
@ -247,12 +256,13 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
"VIF %(vif)s isn't attached to Ironic node %(node)s",
|
||||
{'vif': port_id, 'node': node.uuid})
|
||||
|
||||
def spawn(self, context, server, configdrive_value):
|
||||
def spawn(self, context, server, configdrive_value, partitions):
|
||||
"""Deploy a server.
|
||||
|
||||
:param context: The security context.
|
||||
:param server: The server object.
|
||||
:param configdrive_value: The configdrive value to be injected.
|
||||
:param configdrive_value: configdrive value to be injected.
|
||||
:param partitions: root disk partitions.
|
||||
"""
|
||||
LOG.debug('Spawn called for server', server=server)
|
||||
|
||||
@ -266,7 +276,7 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
|
||||
# add server info to node
|
||||
node = self._get_node(node_ident)
|
||||
self._add_server_info_to_node(node, server)
|
||||
self._add_server_info_to_node(node, server, partitions)
|
||||
|
||||
# validate we are ready to do the deploy
|
||||
validate_chk = self.ironicclient.call("node.validate", node_ident)
|
||||
|
@ -273,6 +273,29 @@ class ImageNotFound(NotFound):
|
||||
_msg_fmt = _("Image %(image_id)s could not be found.")
|
||||
|
||||
|
||||
class ImageNotActive(Invalid):
|
||||
_msg_fmt = _("Image %(image_id)s is not active.")
|
||||
|
||||
|
||||
class InvalidImageConfigDrive(Invalid):
|
||||
_msg_fmt = _("Image's config drive option '%(config_drive)s' is invalid")
|
||||
|
||||
|
||||
class PartitionsNotSupport(Invalid):
|
||||
_msg_fmt = _("Request of image %(image_id)s doesn't support partitions.")
|
||||
|
||||
|
||||
class PartitionSmallerThanImage(Invalid):
|
||||
_msg_fmt = _("Partition is too small for requested image. Root disk "
|
||||
"is %(root_size)i bytes, image is %(image_size)i bytes.")
|
||||
|
||||
|
||||
class PartitionSmallerThanMinDisk(Invalid):
|
||||
_msg_fmt = _("Partition is smaller than the minimum size specified in "
|
||||
"image metadata. Root disk is %(root_size)i bytes, "
|
||||
"minimum size is %(image_min_disk)i bytes.")
|
||||
|
||||
|
||||
class GlanceConnectionFailed(Invalid):
|
||||
_msg_fmt = _("Connection to glance host %(server)s failed: "
|
||||
"%(reason)s")
|
||||
|
@ -63,7 +63,11 @@ Possible values:
|
||||
cfg.StrOpt('engine_driver',
|
||||
default='ironic.IronicDriver',
|
||||
choices=['ironic.IronicDriver'],
|
||||
help=_("Which driver to use, default to ironic driver."))
|
||||
help=_("Which driver to use, default to ironic driver.")),
|
||||
cfg.IntOpt('default_root_partition',
|
||||
default=10,
|
||||
help=_("The default root partition size(GB) for partition "
|
||||
"images."))
|
||||
]
|
||||
|
||||
|
||||
|
@ -76,6 +76,7 @@ def upgrade():
|
||||
sa.Column('availability_zone', sa.String(length=255), nullable=True),
|
||||
sa.Column('node', sa.String(length=255), nullable=True),
|
||||
sa.Column('extra', sa.Text(), nullable=True),
|
||||
sa.Column('partitions', sa.Text(), nullable=True),
|
||||
sa.Column('locked', sa.Boolean(), nullable=True),
|
||||
sa.Column('affinity_zone', sa.String(length=255), nullable=True),
|
||||
sa.Column('locked_by', sa.Enum('admin', 'owner'), nullable=True),
|
||||
|
@ -88,6 +88,7 @@ class Server(Base):
|
||||
node = Column(String(255), nullable=True)
|
||||
launched_at = Column(DateTime, nullable=True)
|
||||
extra = Column(db_types.JsonEncodedDict)
|
||||
partitions = Column(db_types.JsonEncodedDict)
|
||||
locked = Column(Boolean)
|
||||
locked_by = Column(Enum('owner', 'admin'))
|
||||
affinity_zone = Column(String(255), nullable=True)
|
||||
|
@ -22,6 +22,7 @@ import binascii
|
||||
from oslo_log import log
|
||||
from oslo_serialization import base64 as base64utils
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
@ -80,7 +81,7 @@ class API(object):
|
||||
image_uuid, name, description,
|
||||
availability_zone, metadata,
|
||||
requested_networks, user_data,
|
||||
key_name, max_count):
|
||||
key_name, max_count, partitions):
|
||||
"""Verify all the input parameters"""
|
||||
|
||||
if user_data:
|
||||
@ -119,6 +120,7 @@ class API(object):
|
||||
'description': description,
|
||||
'locked': False,
|
||||
'metadata': metadata or {},
|
||||
'partitions': partitions or {},
|
||||
'availability_zone': availability_zone,
|
||||
'key_name': key_name}
|
||||
|
||||
@ -256,6 +258,41 @@ class API(object):
|
||||
|
||||
return servers
|
||||
|
||||
def _is_whole_disk_image(self, context, image):
|
||||
"""Find out if the image is a partition image or a
|
||||
whole disk image.
|
||||
"""
|
||||
iproperties = image.get('properties', {})
|
||||
is_whole_disk_image = (not iproperties.get('kernel_id') and
|
||||
not iproperties.get('ramdisk_id'))
|
||||
return is_whole_disk_image
|
||||
|
||||
def _check_requested_image(self, context, image, partitions):
|
||||
"""Check if the requested image meets the requirements"""
|
||||
if image['status'] != 'active':
|
||||
raise exception.ImageNotActive(image_id=image['id'])
|
||||
|
||||
image_properties = image.get('properties', {})
|
||||
config_drive_option = image_properties.get(
|
||||
'img_config_drive', 'optional')
|
||||
if config_drive_option not in ['optional', 'mandatory']:
|
||||
raise exception.InvalidImageConfigDrive(
|
||||
config_drive=config_drive_option)
|
||||
|
||||
if partitions:
|
||||
# Image min_disk is in gb, size is in bytes. For sanity, have
|
||||
# them both in bytes.
|
||||
image_min_disk = int(image.get('min_disk') or 0) * units.Gi
|
||||
image_size = int(image.get('size') or 0)
|
||||
dest_size = int(partitions.get('root_gb') or 0) * units.Gi
|
||||
|
||||
if image_size > dest_size:
|
||||
raise exception.PartitionSmallerThanImage(
|
||||
root_size=dest_size, image_size=image_size)
|
||||
if image_min_disk > dest_size:
|
||||
raise exception.PartitionSmallerThanMinDisk(
|
||||
root_size=dest_size, image_min_disk=image_min_disk)
|
||||
|
||||
def _check_requested_networks(self, context, requested_networks,
|
||||
max_count):
|
||||
"""Check if the networks requested belongs to the project
|
||||
@ -278,12 +315,17 @@ class API(object):
|
||||
def _create_server(self, context, flavor, image_uuid,
|
||||
name, description, availability_zone, metadata,
|
||||
requested_networks, user_data, injected_files,
|
||||
key_name, min_count, max_count, scheduler_hints):
|
||||
key_name, min_count, max_count, partitions,
|
||||
scheduler_hints):
|
||||
"""Verify all the input parameters"""
|
||||
image = self._get_image(context, image_uuid)
|
||||
iwdi = self._is_whole_disk_image(context, image)
|
||||
if iwdi and partitions:
|
||||
raise exception.PartitionsNotSupport(image_id=image['id'])
|
||||
if (not iwdi) and (not partitions):
|
||||
partitions = {'root_gb': CONF.engine.default_root_partition}
|
||||
|
||||
# Verify the specified image exists
|
||||
if image_uuid:
|
||||
self._get_image(context, image_uuid)
|
||||
self._check_requested_image(context, image, partitions)
|
||||
|
||||
if not availability_zone:
|
||||
availability_zone = CONF.engine.default_schedule_zone
|
||||
@ -292,7 +334,7 @@ class API(object):
|
||||
self._validate_and_build_base_options(
|
||||
context, flavor, image_uuid, name, description,
|
||||
availability_zone, metadata, requested_networks, user_data,
|
||||
key_name, max_count)
|
||||
key_name, max_count, partitions)
|
||||
|
||||
# max_net_count is the maximum number of servers requested by the
|
||||
# user adjusted for any network quota constraints, including
|
||||
@ -328,6 +370,7 @@ class API(object):
|
||||
user_data,
|
||||
decoded_files,
|
||||
key_pair,
|
||||
partitions,
|
||||
request_spec,
|
||||
filter_properties=None)
|
||||
return servers
|
||||
@ -336,7 +379,7 @@ class API(object):
|
||||
name=None, description=None, availability_zone=None,
|
||||
metadata=None, requested_networks=None, user_data=None,
|
||||
injected_files=None, key_name=None, min_count=None,
|
||||
max_count=None, scheduler_hints=None):
|
||||
max_count=None, partitions=None, scheduler_hints=None):
|
||||
"""Provision servers
|
||||
|
||||
Sending server information to the engine and will handle
|
||||
@ -354,7 +397,8 @@ class API(object):
|
||||
availability_zone, metadata,
|
||||
requested_networks, user_data,
|
||||
injected_files, key_name,
|
||||
min_count, max_count, scheduler_hints)
|
||||
min_count, max_count, partitions,
|
||||
scheduler_hints)
|
||||
|
||||
def _delete_server(self, context, server):
|
||||
|
||||
|
@ -49,7 +49,7 @@ class OnFailureRescheduleTask(flow_utils.MoganTask):
|
||||
def __init__(self, engine_rpcapi):
|
||||
requires = ['filter_properties', 'request_spec', 'server',
|
||||
'requested_networks', 'user_data', 'injected_files',
|
||||
'key_pair', 'context']
|
||||
'key_pair', 'partitions', 'context']
|
||||
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
self.engine_rpcapi = engine_rpcapi
|
||||
@ -68,7 +68,7 @@ class OnFailureRescheduleTask(flow_utils.MoganTask):
|
||||
|
||||
def _reschedule(self, context, cause, request_spec, filter_properties,
|
||||
server, requested_networks, user_data, injected_files,
|
||||
key_pair):
|
||||
key_pair, partitions):
|
||||
"""Actions that happen during the rescheduling attempt occur here."""
|
||||
|
||||
create_server = self.engine_rpcapi.schedule_and_create_servers
|
||||
@ -96,6 +96,7 @@ class OnFailureRescheduleTask(flow_utils.MoganTask):
|
||||
user_data=user_data,
|
||||
injected_files=injected_files,
|
||||
key_pair=key_pair,
|
||||
partitions=partitions,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
@ -262,9 +263,9 @@ class CreateServerTask(flow_utils.MoganTask):
|
||||
requires=requires)
|
||||
self.driver = driver
|
||||
|
||||
def execute(self, context, server, configdrive):
|
||||
def execute(self, context, server, configdrive, partitions):
|
||||
configdrive_value = configdrive.get('value')
|
||||
self.driver.spawn(context, server, configdrive_value)
|
||||
self.driver.spawn(context, server, configdrive_value, partitions)
|
||||
LOG.info('Successfully provisioned Ironic node %s',
|
||||
server.node)
|
||||
|
||||
@ -275,7 +276,7 @@ class CreateServerTask(flow_utils.MoganTask):
|
||||
|
||||
|
||||
def get_flow(context, manager, server, requested_networks, user_data,
|
||||
injected_files, key_pair, request_spec,
|
||||
injected_files, key_pair, partitions, request_spec,
|
||||
filter_properties):
|
||||
|
||||
"""Constructs and returns the manager entrypoint flow
|
||||
@ -303,6 +304,7 @@ def get_flow(context, manager, server, requested_networks, user_data,
|
||||
'user_data': user_data,
|
||||
'injected_files': injected_files,
|
||||
'key_pair': key_pair,
|
||||
'partitions': partitions,
|
||||
'configdrive': {}
|
||||
}
|
||||
|
||||
|
@ -304,6 +304,7 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
user_data,
|
||||
injected_files,
|
||||
key_pair,
|
||||
partitions,
|
||||
request_spec=None,
|
||||
filter_properties=None):
|
||||
|
||||
@ -357,13 +358,14 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
user_data,
|
||||
injected_files,
|
||||
key_pair,
|
||||
partitions,
|
||||
request_spec,
|
||||
filter_properties)
|
||||
|
||||
@wrap_server_fault
|
||||
def _create_server(self, context, server, requested_networks,
|
||||
user_data, injected_files, key_pair, request_spec=None,
|
||||
filter_properties=None):
|
||||
user_data, injected_files, key_pair, partitions,
|
||||
request_spec=None, filter_properties=None):
|
||||
"""Perform a deployment."""
|
||||
LOG.debug("Creating server: %s", server)
|
||||
notifications.notify_about_server_action(
|
||||
@ -383,6 +385,7 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
user_data,
|
||||
injected_files,
|
||||
key_pair,
|
||||
partitions,
|
||||
request_spec,
|
||||
filter_properties,
|
||||
)
|
||||
|
@ -51,7 +51,8 @@ class EngineAPI(object):
|
||||
|
||||
def schedule_and_create_servers(self, context, servers, requested_networks,
|
||||
user_data, injected_files, key_pair,
|
||||
request_spec, filter_properties):
|
||||
partitions, request_spec,
|
||||
filter_properties):
|
||||
"""Signal to engine service to perform a deployment."""
|
||||
cctxt = self.client.prepare(topic=self.topic, server=CONF.host)
|
||||
cctxt.cast(context, 'schedule_and_create_servers', servers=servers,
|
||||
@ -59,6 +60,7 @@ class EngineAPI(object):
|
||||
user_data=user_data,
|
||||
injected_files=injected_files,
|
||||
key_pair=key_pair,
|
||||
partitions=partitions,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
|
@ -52,6 +52,7 @@ class Server(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
'node': object_fields.StringField(nullable=True),
|
||||
'launched_at': object_fields.DateTimeField(nullable=True),
|
||||
'metadata': object_fields.FlexibleDictField(nullable=True),
|
||||
'partitions': object_fields.FlexibleDictField(nullable=True),
|
||||
'locked': object_fields.BooleanField(default=False),
|
||||
'locked_by': object_fields.StringField(nullable=True),
|
||||
'affinity_zone': object_fields.StringField(nullable=True),
|
||||
|
@ -42,7 +42,7 @@ def _get_fake_image(**kwargs):
|
||||
u'protected': False,
|
||||
u'schema': u'/v2/schemas/image'}
|
||||
attrs.update(kwargs)
|
||||
return type('Image', (object,), attrs)
|
||||
return attrs
|
||||
|
||||
|
||||
class TestServers(v1_test.APITestV1):
|
||||
|
@ -57,6 +57,7 @@ def get_test_server(**kw):
|
||||
'node': kw.get('node', 'node-0'),
|
||||
'launched_at': kw.get('launched_at'),
|
||||
'extra': kw.get('extra', {}),
|
||||
'partitions': kw.get('partitions', {}),
|
||||
'updated_at': kw.get('updated_at'),
|
||||
'created_at': kw.get('created_at'),
|
||||
'locked': kw.get('locked', False),
|
||||
|
@ -55,6 +55,6 @@ class CreateServerFlowTestCase(base.TestCase):
|
||||
server_obj = obj_utils.get_test_server(self.ctxt)
|
||||
mock_spawn.side_effect = None
|
||||
|
||||
task.execute(self.ctxt, server_obj, {'value': 'configdrive'})
|
||||
task.execute(self.ctxt, server_obj, {'value': 'configdrive'}, None)
|
||||
mock_spawn.assert_called_once_with(
|
||||
self.ctxt, server_obj, 'configdrive')
|
||||
self.ctxt, server_obj, 'configdrive', None)
|
||||
|
@ -60,7 +60,8 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
requested_networks=None,
|
||||
user_data=None,
|
||||
key_name=None,
|
||||
max_count=2)
|
||||
max_count=2,
|
||||
partitions={'root_gb': 100})
|
||||
|
||||
self.assertEqual('fake-user', base_opts['user_id'])
|
||||
self.assertEqual('fake-project', base_opts['project_id'])
|
||||
@ -132,7 +133,7 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
min_count = 1
|
||||
max_count = 2
|
||||
mock_validate.return_value = (base_options, max_count, None)
|
||||
mock_get_image.side_effect = None
|
||||
mock_get_image.return_value = {'status': 'active'}
|
||||
mock_create.return_value = mock.MagicMock()
|
||||
mock_list_az.return_value = {'availability_zones': ['test_az']}
|
||||
mock_select_dest.return_value = \
|
||||
@ -160,7 +161,7 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
mock_validate.assert_called_once_with(
|
||||
self.context, flavor, 'fake-uuid', 'fake-name',
|
||||
'fake-descritpion', 'test_az', {'k1', 'v1'}, requested_networks,
|
||||
None, None, max_count)
|
||||
None, None, max_count, None)
|
||||
self.assertTrue(mock_create.called)
|
||||
self.assertTrue(mock_get_image.called)
|
||||
res = self.dbapi._get_quota_usages(self.context, self.project_id)
|
||||
@ -205,7 +206,7 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
min_count = 11
|
||||
max_count = 20
|
||||
mock_validate.return_value = (base_options, max_count, None)
|
||||
mock_get_image.side_effect = None
|
||||
mock_get_image.return_value = {'status': 'active'}
|
||||
mock_list_az.return_value = {'availability_zones': ['test_az']}
|
||||
requested_networks = [{'uuid': 'fake'}]
|
||||
|
||||
|
@ -110,6 +110,7 @@ class RPCAPITestCase(base.DbTestCase):
|
||||
user_data=None,
|
||||
injected_files=None,
|
||||
key_pair=None,
|
||||
partitions=None,
|
||||
request_spec=None,
|
||||
filter_properties=None)
|
||||
|
||||
|
@ -382,7 +382,7 @@ class _TestObject(object):
|
||||
# version bump. It is md5 hash of object fields and remotable methods.
|
||||
# The fingerprint values should only be changed if there is a version bump.
|
||||
expected_object_fingerprints = {
|
||||
'Server': '1.0-75e6db8272082cd1adf3b74a28ecc9c1',
|
||||
'Server': '1.0-9063f3b9d00d4635c4d1f53183da848b',
|
||||
'ServerFault': '1.0-74349ff701259e4834b4e9dc2dac1b12',
|
||||
'ServerFaultList': '1.0-43e8aad0258652921f929934e9e048fd',
|
||||
'Flavor': '1.0-9f7166aa387d89ec40cd699019d0c9a9',
|
||||
|
Loading…
Reference in New Issue
Block a user