Merge "Remove unused methods"
This commit is contained in:
commit
2b2a5cedfc
@ -78,14 +78,6 @@ def _no_ironic_proxy(cmd):
|
|||||||
class BareMetalNodeController(wsgi.Controller):
|
class BareMetalNodeController(wsgi.Controller):
|
||||||
"""The Bare-Metal Node API controller for the OpenStack API."""
|
"""The Bare-Metal Node API controller for the OpenStack API."""
|
||||||
|
|
||||||
def _node_dict(self, node_ref):
|
|
||||||
d = {}
|
|
||||||
for f in node_fields:
|
|
||||||
d[f] = node_ref.get(f)
|
|
||||||
for f in node_ext_fields:
|
|
||||||
d[f] = node_ref.get(f)
|
|
||||||
return d
|
|
||||||
|
|
||||||
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
|
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
|
||||||
@wsgi.expected_errors((404, 501))
|
@wsgi.expected_errors((404, 501))
|
||||||
def index(self, req):
|
def index(self, req):
|
||||||
|
@ -852,13 +852,6 @@ EXTENSION_LIST_LEGACY_V2_COMPATIBLE = sorted(
|
|||||||
|
|
||||||
class ExtensionInfoController(wsgi.Controller):
|
class ExtensionInfoController(wsgi.Controller):
|
||||||
|
|
||||||
def _add_vif_extension(self, all_extensions):
|
|
||||||
vif_extension_info = {'name': 'ExtendedVIFNet',
|
|
||||||
'alias': 'OS-EXT-VIF-NET',
|
|
||||||
'description': 'Adds network id parameter'
|
|
||||||
' to the virtual interface list.'}
|
|
||||||
all_extensions.append(vif_extension_info)
|
|
||||||
|
|
||||||
@wsgi.expected_errors(())
|
@wsgi.expected_errors(())
|
||||||
def index(self, req):
|
def index(self, req):
|
||||||
context = req.environ['nova.context']
|
context = req.environ['nova.context']
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
networking and storage of VMs, and compute hosts on which they run)."""
|
networking and storage of VMs, and compute hosts on which they run)."""
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
import copy
|
|
||||||
import functools
|
import functools
|
||||||
import re
|
import re
|
||||||
import string
|
import string
|
||||||
@ -1468,20 +1467,6 @@ class API(base.Base):
|
|||||||
bdm.instance_uuid = instance.uuid
|
bdm.instance_uuid = instance.uuid
|
||||||
return instance_block_device_mapping
|
return instance_block_device_mapping
|
||||||
|
|
||||||
def _create_block_device_mapping(self, block_device_mapping):
|
|
||||||
# Copy the block_device_mapping because this method can be called
|
|
||||||
# multiple times when more than one instance is booted in a single
|
|
||||||
# request. This avoids 'id' being set and triggering the object dupe
|
|
||||||
# detection
|
|
||||||
db_block_device_mapping = copy.deepcopy(block_device_mapping)
|
|
||||||
# Create the BlockDeviceMapping objects in the db.
|
|
||||||
for bdm in db_block_device_mapping:
|
|
||||||
# TODO(alaski): Why is this done?
|
|
||||||
if bdm.volume_size == 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
bdm.update_or_create()
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _check_requested_volume_type(bdm, volume_type_id_or_name,
|
def _check_requested_volume_type(bdm, volume_type_id_or_name,
|
||||||
volume_types):
|
volume_types):
|
||||||
|
@ -19,7 +19,6 @@ from oslo_serialization import jsonutils
|
|||||||
from oslo_utils import versionutils
|
from oslo_utils import versionutils
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import objects
|
|
||||||
from nova.objects import base as obj_base
|
from nova.objects import base as obj_base
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
|
|
||||||
@ -256,31 +255,6 @@ class LibvirtLiveMigrateData(LiveMigrateData):
|
|||||||
if target_version < (1, 1) and 'target_connect_addr' in primitive:
|
if target_version < (1, 1) and 'target_connect_addr' in primitive:
|
||||||
del primitive['target_connect_addr']
|
del primitive['target_connect_addr']
|
||||||
|
|
||||||
def _bdms_to_legacy(self, legacy):
|
|
||||||
if not self.obj_attr_is_set('bdms'):
|
|
||||||
return
|
|
||||||
legacy['volume'] = {}
|
|
||||||
for bdmi in self.bdms:
|
|
||||||
legacy['volume'][bdmi.serial] = {
|
|
||||||
'disk_info': bdmi.as_disk_info(),
|
|
||||||
'connection_info': bdmi.connection_info}
|
|
||||||
|
|
||||||
def _bdms_from_legacy(self, legacy_pre_result):
|
|
||||||
self.bdms = []
|
|
||||||
volume = legacy_pre_result.get('volume', {})
|
|
||||||
for serial in volume:
|
|
||||||
vol = volume[serial]
|
|
||||||
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial)
|
|
||||||
bdmi.connection_info = vol['connection_info']
|
|
||||||
bdmi.bus = vol['disk_info']['bus']
|
|
||||||
bdmi.dev = vol['disk_info']['dev']
|
|
||||||
bdmi.type = vol['disk_info']['type']
|
|
||||||
if 'format' in vol:
|
|
||||||
bdmi.format = vol['disk_info']['format']
|
|
||||||
if 'boot_index' in vol:
|
|
||||||
bdmi.boot_index = int(vol['disk_info']['boot_index'])
|
|
||||||
self.bdms.append(bdmi)
|
|
||||||
|
|
||||||
def is_on_shared_storage(self):
|
def is_on_shared_storage(self):
|
||||||
return self.is_shared_block_storage or self.is_shared_instance_path
|
return self.is_shared_block_storage or self.is_shared_instance_path
|
||||||
|
|
||||||
|
@ -4391,11 +4391,10 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||||||
@mock.patch.object(objects.Instance, 'create')
|
@mock.patch.object(objects.Instance, 'create')
|
||||||
@mock.patch.object(self.compute_api.security_group_api,
|
@mock.patch.object(self.compute_api.security_group_api,
|
||||||
'ensure_default')
|
'ensure_default')
|
||||||
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
|
|
||||||
@mock.patch.object(objects.RequestSpec, 'from_components')
|
@mock.patch.object(objects.RequestSpec, 'from_components')
|
||||||
def do_test(
|
def do_test(
|
||||||
mock_req_spec_from_components, _mock_create_bdm,
|
mock_req_spec_from_components, _mock_ensure_default,
|
||||||
_mock_ensure_default, _mock_create, mock_check_num_inst_quota):
|
_mock_create, mock_check_num_inst_quota):
|
||||||
req_spec_mock = mock.MagicMock()
|
req_spec_mock = mock.MagicMock()
|
||||||
|
|
||||||
mock_check_num_inst_quota.return_value = 1
|
mock_check_num_inst_quota.return_value = 1
|
||||||
@ -4487,8 +4486,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||||||
'create_db_entry_for_new_instance')
|
'create_db_entry_for_new_instance')
|
||||||
@mock.patch.object(self.compute_api,
|
@mock.patch.object(self.compute_api,
|
||||||
'_bdm_validate_set_size_and_instance')
|
'_bdm_validate_set_size_and_instance')
|
||||||
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
|
def do_test(mock_bdm_v, mock_cdb, mock_sg, mock_cniq):
|
||||||
def do_test(mock_cbdm, mock_bdm_v, mock_cdb, mock_sg, mock_cniq):
|
|
||||||
mock_cniq.return_value = 1
|
mock_cniq.return_value = 1
|
||||||
self.compute_api._provision_instances(self.context,
|
self.compute_api._provision_instances(self.context,
|
||||||
mock.sentinel.flavor,
|
mock.sentinel.flavor,
|
||||||
@ -4610,8 +4608,6 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||||||
'ensure_default', new=mock.MagicMock())
|
'ensure_default', new=mock.MagicMock())
|
||||||
@mock.patch.object(self.compute_api, '_validate_bdm',
|
@mock.patch.object(self.compute_api, '_validate_bdm',
|
||||||
new=mock.MagicMock())
|
new=mock.MagicMock())
|
||||||
@mock.patch.object(self.compute_api, '_create_block_device_mapping',
|
|
||||||
new=mock.MagicMock())
|
|
||||||
@mock.patch.object(objects.RequestSpec, 'from_components',
|
@mock.patch.object(objects.RequestSpec, 'from_components',
|
||||||
mock.MagicMock())
|
mock.MagicMock())
|
||||||
@mock.patch('nova.objects.InstanceMapping')
|
@mock.patch('nova.objects.InstanceMapping')
|
||||||
@ -4700,14 +4696,12 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||||||
@mock.patch.object(objects, 'Instance')
|
@mock.patch.object(objects, 'Instance')
|
||||||
@mock.patch.object(self.compute_api.security_group_api,
|
@mock.patch.object(self.compute_api.security_group_api,
|
||||||
'ensure_default')
|
'ensure_default')
|
||||||
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
|
|
||||||
@mock.patch.object(objects.RequestSpec, 'from_components')
|
@mock.patch.object(objects.RequestSpec, 'from_components')
|
||||||
@mock.patch.object(objects, 'BuildRequest')
|
@mock.patch.object(objects, 'BuildRequest')
|
||||||
@mock.patch.object(objects, 'InstanceMapping')
|
@mock.patch.object(objects, 'InstanceMapping')
|
||||||
def do_test(mock_inst_mapping, mock_build_req,
|
def do_test(mock_inst_mapping, mock_build_req,
|
||||||
mock_req_spec_from_components, _mock_create_bdm,
|
mock_req_spec_from_components, _mock_ensure_default,
|
||||||
_mock_ensure_default, mock_inst, mock_check_num_inst_quota,
|
mock_inst, mock_check_num_inst_quota, mock_create_rs_br_im):
|
||||||
mock_create_rs_br_im):
|
|
||||||
|
|
||||||
min_count = 1
|
min_count = 1
|
||||||
max_count = 2
|
max_count = 2
|
||||||
@ -4794,8 +4788,6 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||||||
@mock.patch('nova.compute.utils.check_num_instances_quota')
|
@mock.patch('nova.compute.utils.check_num_instances_quota')
|
||||||
@mock.patch.object(self.compute_api, 'security_group_api')
|
@mock.patch.object(self.compute_api, 'security_group_api')
|
||||||
@mock.patch.object(compute_api, 'objects')
|
@mock.patch.object(compute_api, 'objects')
|
||||||
@mock.patch.object(self.compute_api, '_create_block_device_mapping',
|
|
||||||
new=mock.MagicMock())
|
|
||||||
@mock.patch.object(self.compute_api,
|
@mock.patch.object(self.compute_api,
|
||||||
'create_db_entry_for_new_instance',
|
'create_db_entry_for_new_instance',
|
||||||
new=mock.MagicMock())
|
new=mock.MagicMock())
|
||||||
|
@ -22,7 +22,6 @@ from oslo_utils import uuidutils
|
|||||||
from oslo_vmware import exceptions as vexc
|
from oslo_vmware import exceptions as vexc
|
||||||
from oslo_vmware.objects import datastore as ds_obj
|
from oslo_vmware.objects import datastore as ds_obj
|
||||||
from oslo_vmware import pbm
|
from oslo_vmware import pbm
|
||||||
from oslo_vmware import vim_util as vutil
|
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.network import model as network_model
|
from nova.network import model as network_model
|
||||||
@ -1872,70 +1871,6 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
|
|||||||
parent_folder,
|
parent_folder,
|
||||||
name=child_name)
|
name=child_name)
|
||||||
|
|
||||||
def test_get_folder_does_not_exist(self):
|
|
||||||
session = fake.FakeSession()
|
|
||||||
with mock.patch.object(session, '_call_method',
|
|
||||||
return_value=None):
|
|
||||||
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
|
|
||||||
self.assertIsNone(ret)
|
|
||||||
expected_invoke_api = [mock.call(vutil, 'get_object_property',
|
|
||||||
'fake-parent',
|
|
||||||
'childEntity')]
|
|
||||||
self.assertEqual(expected_invoke_api,
|
|
||||||
session._call_method.mock_calls)
|
|
||||||
|
|
||||||
def test_get_folder_child_entry_not_folder(self):
|
|
||||||
child_entity = mock.Mock()
|
|
||||||
child_entity._type = 'NotFolder'
|
|
||||||
prop_val = mock.Mock()
|
|
||||||
prop_val.ManagedObjectReference = [child_entity]
|
|
||||||
session = fake.FakeSession()
|
|
||||||
with mock.patch.object(session, '_call_method',
|
|
||||||
return_value=prop_val):
|
|
||||||
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
|
|
||||||
self.assertIsNone(ret)
|
|
||||||
expected_invoke_api = [mock.call(vutil, 'get_object_property',
|
|
||||||
'fake-parent',
|
|
||||||
'childEntity')]
|
|
||||||
self.assertEqual(expected_invoke_api,
|
|
||||||
session._call_method.mock_calls)
|
|
||||||
|
|
||||||
def test_get_folder_child_entry_not_matched(self):
|
|
||||||
child_entity = mock.Mock()
|
|
||||||
child_entity._type = 'Folder'
|
|
||||||
prop_val = mock.Mock()
|
|
||||||
prop_val.ManagedObjectReference = [child_entity]
|
|
||||||
session = fake.FakeSession()
|
|
||||||
with mock.patch.object(session, '_call_method',
|
|
||||||
side_effect=[prop_val, 'fake-1-name']):
|
|
||||||
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
|
|
||||||
self.assertIsNone(ret)
|
|
||||||
expected_invoke_api = [mock.call(vutil, 'get_object_property',
|
|
||||||
'fake-parent',
|
|
||||||
'childEntity'),
|
|
||||||
mock.call(vutil, 'get_object_property',
|
|
||||||
child_entity, 'name')]
|
|
||||||
self.assertEqual(expected_invoke_api,
|
|
||||||
session._call_method.mock_calls)
|
|
||||||
|
|
||||||
def test_get_folder_child_entry_matched(self):
|
|
||||||
child_entity = mock.Mock()
|
|
||||||
child_entity._type = 'Folder'
|
|
||||||
prop_val = mock.Mock()
|
|
||||||
prop_val.ManagedObjectReference = [child_entity]
|
|
||||||
session = fake.FakeSession()
|
|
||||||
with mock.patch.object(session, '_call_method',
|
|
||||||
side_effect=[prop_val, 'fake-name']):
|
|
||||||
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
|
|
||||||
self.assertEqual(ret, child_entity)
|
|
||||||
expected_invoke_api = [mock.call(vutil, 'get_object_property',
|
|
||||||
'fake-parent',
|
|
||||||
'childEntity'),
|
|
||||||
mock.call(vutil, 'get_object_property',
|
|
||||||
child_entity, 'name')]
|
|
||||||
self.assertEqual(expected_invoke_api,
|
|
||||||
session._call_method.mock_calls)
|
|
||||||
|
|
||||||
def test_folder_path_ref_cache(self):
|
def test_folder_path_ref_cache(self):
|
||||||
path = 'OpenStack/Project (e2b86092bf064181ade43deb3188f8e4)'
|
path = 'OpenStack/Project (e2b86092bf064181ade43deb3188f8e4)'
|
||||||
self.assertIsNone(vm_util.folder_ref_cache_get(path))
|
self.assertIsNone(vm_util.folder_ref_cache_get(path))
|
||||||
|
@ -1633,23 +1633,6 @@ def get_swap(session, vm_ref):
|
|||||||
return device
|
return device
|
||||||
|
|
||||||
|
|
||||||
def _get_folder(session, parent_folder_ref, name):
|
|
||||||
# Get list of child entities for the parent folder
|
|
||||||
prop_val = session._call_method(vutil, 'get_object_property',
|
|
||||||
parent_folder_ref,
|
|
||||||
'childEntity')
|
|
||||||
if prop_val:
|
|
||||||
child_entities = prop_val.ManagedObjectReference
|
|
||||||
|
|
||||||
# Return if the child folder with input name is already present
|
|
||||||
for child_entity in child_entities:
|
|
||||||
if child_entity._type != 'Folder':
|
|
||||||
continue
|
|
||||||
child_entity_name = vim_util.get_entity_name(session, child_entity)
|
|
||||||
if child_entity_name == name:
|
|
||||||
return child_entity
|
|
||||||
|
|
||||||
|
|
||||||
def create_folder(session, parent_folder_ref, name):
|
def create_folder(session, parent_folder_ref, name):
|
||||||
"""Creates a folder in vCenter
|
"""Creates a folder in vCenter
|
||||||
|
|
||||||
|
@ -502,9 +502,6 @@ class Failure(Exception):
|
|||||||
except Exception:
|
except Exception:
|
||||||
return "XenAPI Fake Failure: %s" % str(self.details)
|
return "XenAPI Fake Failure: %s" % str(self.details)
|
||||||
|
|
||||||
def _details_map(self):
|
|
||||||
return {str(i): self.details[i] for i in range(len(self.details))}
|
|
||||||
|
|
||||||
|
|
||||||
class SessionBase(object):
|
class SessionBase(object):
|
||||||
"""Base class for Fake Sessions."""
|
"""Base class for Fake Sessions."""
|
||||||
|
Loading…
Reference in New Issue
Block a user