Merge "Persist the request spec during an instance boot"
This commit is contained in:
commit
df0fca62cf
@ -906,7 +906,7 @@ class API(base.Base):
|
||||
def _provision_instances(self, context, instance_type, min_count,
|
||||
max_count, base_options, boot_meta, security_groups,
|
||||
block_device_mapping, shutdown_terminate,
|
||||
instance_group, check_server_group_quota):
|
||||
instance_group, check_server_group_quota, filter_properties):
|
||||
# Reserve quotas
|
||||
num_instances, quotas = self._check_num_instances_quota(
|
||||
context, instance_type, min_count, max_count)
|
||||
@ -914,7 +914,18 @@ class API(base.Base):
|
||||
instances = []
|
||||
try:
|
||||
for i in range(num_instances):
|
||||
# Create a uuid for the instance so we can store the
|
||||
# RequestSpec before the instance is created.
|
||||
instance_uuid = str(uuid.uuid4())
|
||||
# Store the RequestSpec that will be used for scheduling.
|
||||
req_spec = objects.RequestSpec.from_components(context,
|
||||
instance_uuid, boot_meta, instance_type,
|
||||
base_options['numa_topology'],
|
||||
base_options['pci_requests'], filter_properties,
|
||||
instance_group, base_options['availability_zone'])
|
||||
req_spec.create()
|
||||
instance = objects.Instance(context=context)
|
||||
instance.uuid = instance_uuid
|
||||
instance.update(base_options)
|
||||
instance = self.create_db_entry_for_new_instance(
|
||||
context, instance_type, boot_meta, instance,
|
||||
@ -1096,7 +1107,7 @@ class API(base.Base):
|
||||
instances = self._provision_instances(context, instance_type,
|
||||
min_count, max_count, base_options, boot_meta, security_groups,
|
||||
block_device_mapping, shutdown_terminate,
|
||||
instance_group, check_server_group_quota)
|
||||
instance_group, check_server_group_quota, filter_properties)
|
||||
|
||||
for instance in instances:
|
||||
self._record_action_start(context, instance,
|
||||
@ -1308,11 +1319,6 @@ class API(base.Base):
|
||||
index, security_groups, instance_type):
|
||||
"""Build the beginning of a new instance."""
|
||||
|
||||
if not instance.obj_attr_is_set('uuid'):
|
||||
# Generate the instance_uuid here so we can use it
|
||||
# for additional setup before creating the DB entry.
|
||||
instance.uuid = str(uuid.uuid4())
|
||||
|
||||
instance.launch_index = index
|
||||
instance.vm_state = vm_states.BUILDING
|
||||
instance.task_state = task_states.SCHEDULING
|
||||
|
@ -184,6 +184,13 @@ class RequestSpec(base.NovaObject):
|
||||
def from_primitives(cls, context, request_spec, filter_properties):
|
||||
"""Returns a new RequestSpec object by hydrating it from legacy dicts.
|
||||
|
||||
Deprecated. A RequestSpec object is created early in the boot process
|
||||
using the from_components method. That object will either be passed to
|
||||
places that require it, or it can be looked up with
|
||||
get_by_instance_uuid. This method can be removed when there are no
|
||||
longer any callers. Because the method is not remotable it is not tied
|
||||
to object versioning.
|
||||
|
||||
That helper is not intended to leave the legacy dicts kept in the nova
|
||||
codebase, but is rather just for giving a temporary solution for
|
||||
populating the Spec object until we get rid of scheduler_utils'
|
||||
@ -318,6 +325,45 @@ class RequestSpec(base.NovaObject):
|
||||
hint) for hint in self.scheduler_hints}
|
||||
return filt_props
|
||||
|
||||
@classmethod
|
||||
def from_components(cls, context, instance_uuid, image, flavor,
|
||||
numa_topology, pci_requests, filter_properties, instance_group,
|
||||
availability_zone):
|
||||
"""Returns a new RequestSpec object hydrated by various components.
|
||||
|
||||
This helper is useful in creating the RequestSpec from the various
|
||||
objects that are assembled early in the boot process. This method
|
||||
creates a complete RequestSpec object with all properties set or
|
||||
intentionally left blank.
|
||||
|
||||
:param context: a context object
|
||||
:param instance_uuid: the uuid of the instance to schedule
|
||||
:param image: a dict of properties for an image or volume
|
||||
:param flavor: a flavor NovaObject
|
||||
:param numa_topology: InstanceNUMATopology or None
|
||||
:param pci_requests: InstancePCIRequests
|
||||
:param filter_properties: a dict of properties for scheduling
|
||||
:param instance_group: None or an instance group NovaObject
|
||||
:param availability_zone: an availability_zone string
|
||||
"""
|
||||
spec_obj = cls(context)
|
||||
spec_obj.num_instances = 1
|
||||
spec_obj.instance_uuid = instance_uuid
|
||||
spec_obj.instance_group = instance_group
|
||||
spec_obj.project_id = context.project_id
|
||||
spec_obj._image_meta_from_image(image)
|
||||
spec_obj._from_flavor(flavor)
|
||||
spec_obj._from_instance_pci_requests(pci_requests)
|
||||
spec_obj._from_instance_numa_topology(numa_topology)
|
||||
spec_obj.ignore_hosts = filter_properties.get('ignore_hosts')
|
||||
spec_obj.force_hosts = filter_properties.get('force_hosts')
|
||||
spec_obj.force_nodes = filter_properties.get('force_nodes')
|
||||
spec_obj._from_retry(filter_properties.get('retry', {}))
|
||||
spec_obj._from_limits(filter_properties.get('limits', {}))
|
||||
spec_obj._from_hints(filter_properties.get('scheduler_hints', {}))
|
||||
spec_obj.availability_zone = availability_zone
|
||||
return spec_obj
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object(context, spec, db_spec):
|
||||
spec = spec.obj_from_primitive(jsonutils.loads(db_spec['spec']))
|
||||
|
@ -210,6 +210,7 @@ class TestCase(testtools.TestCase):
|
||||
|
||||
if self.USES_DB:
|
||||
self.useFixture(nova_fixtures.Database())
|
||||
self.useFixture(nova_fixtures.Database(database='api'))
|
||||
|
||||
# NOTE(blk-u): WarningsFixture must be after the Database fixture
|
||||
# because sqlalchemy-migrate messes with the warnings filters.
|
||||
|
@ -1949,6 +1949,7 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
server_update_and_get_original)
|
||||
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
|
||||
fake_method)
|
||||
self.stub_out('nova.objects.RequestSpec.create', fake_method)
|
||||
self.body = {
|
||||
'server': {
|
||||
'min_count': 2,
|
||||
|
@ -139,6 +139,7 @@ class MultiCreateExtensionTestV21(test.TestCase):
|
||||
server_update)
|
||||
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
|
||||
fake_method)
|
||||
self.stub_out('nova.objects.RequestSpec.create', fake_method)
|
||||
self.req = fakes.HTTPRequest.blank('')
|
||||
|
||||
def _test_create_extra(self, params, no_image=False,
|
||||
|
@ -2392,6 +2392,7 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
server_update_and_get_original)
|
||||
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
|
||||
fake_method)
|
||||
self.stub_out('nova.objects.RequestSpec.create', fake_method)
|
||||
self.body = {
|
||||
'server': {
|
||||
'name': 'server_test',
|
||||
|
@ -255,6 +255,12 @@ class BaseTestCase(test.TestCase):
|
||||
fake_allocate_for_instance)
|
||||
self.compute_api = compute.API()
|
||||
|
||||
def fake_spec_create(*args, **kwargs):
|
||||
pass
|
||||
|
||||
# Tests in this module do not depend on this running.
|
||||
self.stub_out('nova.objects.RequestSpec.create', fake_spec_create)
|
||||
|
||||
# Just to make long lines short
|
||||
self.rt = self.compute._get_resource_tracker(NODENAME)
|
||||
|
||||
@ -7753,7 +7759,8 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
|
||||
def test_populate_instance_for_create(self):
|
||||
base_options = {'image_ref': self.fake_image['id'],
|
||||
'system_metadata': {'fake': 'value'}}
|
||||
'system_metadata': {'fake': 'value'},
|
||||
'uuid': uuids.instance}
|
||||
instance = objects.Instance()
|
||||
instance.update(base_options)
|
||||
inst_type = flavors.get_flavor_by_name("m1.tiny")
|
||||
|
@ -2803,6 +2803,68 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self._test_create_db_entry_for_new_instance_with_cinder_error(
|
||||
expected_exception=exception.InvalidVolume)
|
||||
|
||||
def test_provision_instances_creates_request_spec(self):
|
||||
@mock.patch.object(self.compute_api, '_check_num_instances_quota')
|
||||
@mock.patch.object(objects.Instance, 'create')
|
||||
@mock.patch.object(self.compute_api.security_group_api,
|
||||
'ensure_default')
|
||||
@mock.patch.object(self.compute_api, '_validate_bdm')
|
||||
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
|
||||
@mock.patch.object(objects.RequestSpec, 'from_components')
|
||||
def do_test(mock_from_components, _mock_create_bdm, _mock_validate_bdm,
|
||||
_mock_ensure_default, _mock_create, mock_check_num_inst_quota):
|
||||
quota_mock = mock.MagicMock()
|
||||
req_spec_mock = mock.MagicMock()
|
||||
|
||||
mock_check_num_inst_quota.return_value = (1, quota_mock)
|
||||
mock_from_components.return_value = req_spec_mock
|
||||
|
||||
ctxt = context.RequestContext('fake-user', 'fake-project')
|
||||
flavor = self._create_flavor()
|
||||
min_count = max_count = 1
|
||||
boot_meta = {
|
||||
'id': 'fake-image-id',
|
||||
'properties': {'mappings': []},
|
||||
'status': 'fake-status',
|
||||
'location': 'far-away'}
|
||||
base_options = {'image_ref': 'fake-ref',
|
||||
'display_name': 'fake-name',
|
||||
'project_id': 'fake-project',
|
||||
'availability_zone': None,
|
||||
'numa_topology': None,
|
||||
'pci_requests': None}
|
||||
security_groups = {}
|
||||
block_device_mapping = [objects.BlockDeviceMapping(
|
||||
**fake_block_device.FakeDbBlockDeviceDict(
|
||||
{
|
||||
'id': 1,
|
||||
'volume_id': 1,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'device_name': 'vda',
|
||||
'boot_index': 0,
|
||||
}))]
|
||||
shutdown_terminate = True
|
||||
instance_group = None
|
||||
check_server_group_quota = False
|
||||
filter_properties = {'scheduler_hints': None,
|
||||
'instance_type': flavor}
|
||||
|
||||
instances = self.compute_api._provision_instances(ctxt, flavor,
|
||||
min_count, max_count, base_options, boot_meta,
|
||||
security_groups, block_device_mapping, shutdown_terminate,
|
||||
instance_group, check_server_group_quota,
|
||||
filter_properties)
|
||||
self.assertTrue(uuidutils.is_uuid_like(instances[0].uuid))
|
||||
|
||||
mock_from_components.assert_called_once_with(ctxt, mock.ANY,
|
||||
boot_meta, flavor, base_options['numa_topology'],
|
||||
base_options['pci_requests'], filter_properties,
|
||||
instance_group, base_options['availability_zone'])
|
||||
req_spec_mock.create.assert_called_once_with()
|
||||
|
||||
do_test()
|
||||
|
||||
def _test_rescue(self, vm_state=vm_states.ACTIVE, rescue_password=None,
|
||||
rescue_image=None, clean_shutdown=True):
|
||||
instance = self._create_instance_obj(params={'vm_state': vm_state})
|
||||
|
@ -21,6 +21,8 @@ from nova import exception
|
||||
from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import request_spec
|
||||
from nova.tests.unit import fake_flavor
|
||||
from nova.tests.unit import fake_instance
|
||||
from nova.tests.unit import fake_request_spec
|
||||
from nova.tests.unit.objects import test_objects
|
||||
|
||||
@ -293,6 +295,25 @@ class _TestRequestSpecObject(object):
|
||||
# just making sure that the context is set by the method
|
||||
self.assertEqual(ctxt, spec._context)
|
||||
|
||||
def test_from_components(self):
|
||||
ctxt = context.RequestContext('fake-user', 'fake-project')
|
||||
instance = fake_instance.fake_instance_obj(ctxt)
|
||||
image = {'id': 'fake-image-id', 'properties': {'mappings': []},
|
||||
'status': 'fake-status', 'location': 'far-away'}
|
||||
flavor = fake_flavor.fake_flavor_obj(ctxt)
|
||||
filter_properties = {}
|
||||
instance_group = None
|
||||
|
||||
spec = objects.RequestSpec.from_components(ctxt, instance, image,
|
||||
flavor, instance.numa_topology, instance.pci_requests,
|
||||
filter_properties, instance_group, instance.availability_zone)
|
||||
# Make sure that all fields are set using that helper method
|
||||
for field in [f for f in spec.obj_fields if f != 'id']:
|
||||
self.assertEqual(True, spec.obj_attr_is_set(field),
|
||||
'Field: %s is not set' % field)
|
||||
# just making sure that the context is set by the method
|
||||
self.assertEqual(ctxt, spec._context)
|
||||
|
||||
def test_get_scheduler_hint(self):
|
||||
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
|
||||
'foo_mul': ['1', '2']})
|
||||
|
19
releasenotes/notes/request-spec-api-db-b9cc6e0624d563c5.yaml
Normal file
19
releasenotes/notes/request-spec-api-db-b9cc6e0624d563c5.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
The commit with change-id Idd4bbbe8eea68b9e538fa1567efd304e9115a02a
|
||||
requires that the nova_api database is setup and Nova is configured to use
|
||||
it. Instructions on doing that are provided below.
|
||||
|
||||
Nova now requires that two databases are available and configured. The
|
||||
existing nova database needs no changes, but a new nova_api database needs
|
||||
to be setup. It is configured and managed very similarly to the nova
|
||||
database. A new connection string configuration option is available in the
|
||||
api_database group. An example::
|
||||
|
||||
[api_database]
|
||||
connection = mysql+pymysql://user:secret@127.0.0.1/nova_api?charset=utf8
|
||||
|
||||
And a new nova-manage command has been added to manage db migrations for
|
||||
this database. "nova-manage api_db sync" and "nova-manage api_db version"
|
||||
are available and function like the parallel "nova-manage db ..." version.
|
Loading…
x
Reference in New Issue
Block a user