Change how the API is getting a list of compute nodes

Instead of calling the DB API, we prefer to call the ComputeNode object methods
and change how the fields are accessed by backporting them to primitives if
needed.

Partially-Implements blueprint detach-service-from-computenode

Change-Id: Ibf9d2f58566a8fddd52fa32ee94fbb5ea9ffd3a5
This commit is contained in:
Sylvain Bauza 2015-02-09 18:36:14 +01:00
parent 57fd4d6308
commit 6933248ff1
9 changed files with 140 additions and 123 deletions

View File

@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob.exc
from nova.api.openstack import extensions
@ -63,8 +61,12 @@ class PciServerController(wsgi.Controller):
class PciHypervisorController(wsgi.Controller):
def _extend_hypervisor(self, hypervisor, compute_node):
hypervisor['%s:pci_stats' % Pci.alias] = jsonutils.loads(
compute_node['pci_stats'])
if compute_node.pci_device_pools is not None:
pci_pools = [pci_pool.to_dict()
for pci_pool in compute_node.pci_device_pools]
else:
pci_pools = []
hypervisor['%s:pci_stats' % Pci.alias] = pci_pools
@wsgi.extends
def show(self, req, resp_obj, id):

View File

@ -3442,14 +3442,14 @@ class HostAPI(base.Base):
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
return objects.ComputeNode.get_by_id(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
return objects.ComputeNodeList.get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
return objects.ComputeNodeList.get_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)

View File

@ -16,8 +16,6 @@
import copy
from oslo_serialization import jsonutils
from nova import exception
from nova.i18n import _LE
from nova.openstack.common import log as logging
@ -56,7 +54,9 @@ class PciDeviceStats(object):
def __init__(self, stats=None):
super(PciDeviceStats, self).__init__()
self.pools = jsonutils.loads(stats) if stats else []
# NOTE(sbauza): Stats are a PCIDevicePoolList object
self.pools = [pci_pool.to_dict()
for pci_pool in stats] if stats else []
self.pools.sort(self.pool_cmp)
def _equal_properties(self, dev, entry, matching_keys):

View File

@ -13,9 +13,10 @@
# under the License.
import mock
from oslo_serialization import jsonutils
from nova import db
from nova import objects
from nova.objects import pci_device_pool
from nova.tests.functional.v3 import api_sample_base
from nova.tests.functional.v3 import test_servers
@ -85,32 +86,33 @@ class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
def setUp(self):
super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
self.fake_compute_node = {"cpu_info": "?",
"current_workload": 0,
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"state": "up",
"status": "enabled",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {"host": '043b3cacf6f34c90a'
'7245151fc8ebcda',
"disabled": False,
"disabled_reason": None},
"vcpus": 1,
"vcpus_used": 0,
"service_id": 2,
"host": '043b3cacf6f34c90a7245151fc8ebcda',
"pci_stats": [
self.fake_compute_node = objects.ComputeNode(
cpu_info="?",
current_workload=0,
disk_available_least=0,
host_ip="1.1.1.1",
state="up",
status="enabled",
free_disk_gb=1028,
free_ram_mb=7680,
hypervisor_hostname="fake-mini",
hypervisor_type="fake",
hypervisor_version=1000,
id=1,
local_gb=1028,
local_gb_used=0,
memory_mb=8192,
memory_mb_used=512,
running_vms=0,
_cached_service=objects.Service(
host='043b3cacf6f34c90a7245151fc8ebcda',
disabled=False,
disabled_reason=None),
vcpus=1,
vcpus_used=0,
service_id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
pci_device_pools=pci_device_pool.from_pci_stats(
{"count": 5,
"vendor_id": "8086",
"product_id": "1520",
@ -119,14 +121,12 @@ class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
"phys_function": '[["0x0000", '
'"0x04", "0x00",'
' "0x1"]]',
"key1": "value1"}}]}
"key1": "value1"}}),)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.db.compute_node_get")
def test_pci_show(self, mock_db, mock_service):
self.fake_compute_node['pci_stats'] = jsonutils.dumps(
self.fake_compute_node['pci_stats'])
mock_db.return_value = self.fake_compute_node
@mock.patch("nova.objects.ComputeNode.get_by_id")
def test_pci_show(self, mock_obj, mock_service):
mock_obj.return_value = self.fake_compute_node
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs = {
@ -137,12 +137,9 @@ class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.db.compute_node_get_all")
def test_pci_detail(self, mock_db, mock_service):
self.fake_compute_node['pci_stats'] = jsonutils.dumps(
self.fake_compute_node['pci_stats'])
mock_db.return_value = [self.fake_compute_node]
@mock.patch("nova.objects.ComputeNodeList.get_all")
def test_pci_detail(self, mock_obj, mock_service):
mock_obj.return_value = [self.fake_compute_node]
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id

View File

@ -20,7 +20,6 @@ from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
from nova.api.openstack.compute.plugins.v3 import hypervisors \
as hypervisors_v21
from nova.api.openstack import extensions
from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack.compute.contrib import test_hypervisors
@ -28,14 +27,14 @@ from nova.tests.unit.api.openstack import fakes
def fake_compute_node_get(context, compute_id):
for hyper in test_hypervisors.TEST_HYPERS:
if hyper['id'] == compute_id:
for hyper in test_hypervisors.TEST_HYPERS_OBJ:
if hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
def fake_compute_node_get_all(context):
return test_hypervisors.TEST_HYPERS
return test_hypervisors.TEST_HYPERS_OBJ
class ExtendedHypervisorsTestV21(test.NoDBTestCase):
@ -66,8 +65,9 @@ class ExtendedHypervisorsTestV21(test.NoDBTestCase):
super(ExtendedHypervisorsTestV21, self).setUp()
self._set_up_controller()
self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
self.stubs.Set(db, 'compute_node_get',
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
fake_compute_node_get_all)
self.stubs.Set(self.controller.host_api, 'compute_node_get',
fake_compute_node_get)
def test_view_hypervisor_detail_noservers(self):

View File

@ -20,19 +20,19 @@ from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
from nova.api.openstack.compute.plugins.v3 import hypervisors \
as hypervisors_v21
from nova.api.openstack import extensions
from nova import objects
from nova import test
from nova.tests.unit.api.openstack.compute.contrib import test_hypervisors
TEST_HYPER = dict(test_hypervisors.TEST_HYPERS[0],
service=dict(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
)
TEST_HYPER = test_hypervisors.TEST_HYPERS_OBJ[0].obj_clone()
TEST_HYPER._cached_service = objects.Service(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova")
class HypervisorStatusTestV21(test.NoDBTestCase):
@ -55,6 +55,9 @@ class HypervisorStatusTestV21(test.NoDBTestCase):
self.assertEqual('down', result['state'])
hyper = copy.deepcopy(TEST_HYPER)
# compute.service is not considered as a ComputeNode field, we need to
# copy it manually
hyper._cached_service = TEST_HYPER._cached_service.obj_clone()
hyper['service']['disabled'] = True
result = self.controller._view_hypervisor(hyper, False)
self.assertEqual('disabled', result['status'])
@ -75,6 +78,9 @@ class HypervisorStatusTestV21(test.NoDBTestCase):
self.assertEqual('down', result['state'])
hyper = copy.deepcopy(TEST_HYPER)
# compute.service is not considered as a ComputeNode field, we need to
# copy it manually
hyper._cached_service = TEST_HYPER._cached_service.obj_clone()
hyper['service']['disabled'] = True
hyper['service']['disabled_reason'] = "fake"
result = self.controller._view_hypervisor(hyper, True)

View File

@ -16,6 +16,7 @@
import copy
import mock
import netaddr
from webob import exc
from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
@ -25,21 +26,13 @@ from nova.api.openstack import extensions
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
TEST_HYPERS = [
dict(id=1,
service_id=1,
service=dict(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
host="compute1",
vcpus=4,
memory_mb=10 * 1024,
@ -56,17 +49,10 @@ TEST_HYPERS = [
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip='1.1.1.1'),
host_ip=netaddr.IPAddress('1.1.1.1')),
dict(id=2,
service_id=2,
service=dict(id=2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
host="compute2",
vcpus=4,
memory_mb=10 * 1024,
@ -83,7 +69,36 @@ TEST_HYPERS = [
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip='2.2.2.2')]
host_ip=netaddr.IPAddress('2.2.2.2'))]
TEST_SERVICES = [
dict(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
dict(id=2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
]
TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct)
for hyper_dct in TEST_HYPERS]
TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})
TEST_HYPERS_OBJ[0]._cached_service = objects.Service(**TEST_SERVICES[0])
TEST_HYPERS_OBJ[1]._cached_service = objects.Service(**TEST_SERVICES[1])
TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
dict(name="inst2", uuid="uuid2", host="compute2"),
dict(name="inst3", uuid="uuid3", host="compute1"),
@ -91,16 +106,16 @@ TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
def fake_compute_node_get_all(context):
return TEST_HYPERS
return TEST_HYPERS_OBJ
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS
return TEST_HYPERS_OBJ
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS:
if hyper['id'] == compute_id:
for hyper in TEST_HYPERS_OBJ:
if hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
@ -121,7 +136,7 @@ def fake_compute_node_statistics(context):
disk_available_least=0,
)
for hyper in TEST_HYPERS:
for hyper in TEST_HYPERS_OBJ:
for key in result:
if key == 'count':
result[key] += 1
@ -176,10 +191,12 @@ class HypervisorsTestV21(test.NoDBTestCase):
super(HypervisorsTestV21, self).setUp()
self._set_up_controller()
self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
self.stubs.Set(db, 'compute_node_search_by_hypervisor',
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
fake_compute_node_get_all)
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor)
self.stubs.Set(db, 'compute_node_get',
self.stubs.Set(self.controller.host_api, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(db, 'compute_node_statistics',
fake_compute_node_statistics)
@ -302,7 +319,8 @@ class HypervisorsTestV21(test.NoDBTestCase):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(db, 'compute_node_search_by_hypervisor',
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
@ -325,7 +343,8 @@ class HypervisorsTestV21(test.NoDBTestCase):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(db, 'compute_node_search_by_hypervisor',
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
@ -342,7 +361,8 @@ class HypervisorsTestV21(test.NoDBTestCase):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(db, 'compute_node_search_by_hypervisor',
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)

View File

@ -13,7 +13,6 @@
# under the License.
from oslo_serialization import jsonutils
from webob import exc
from nova.api.openstack.compute.plugins.v3 import pci
@ -22,18 +21,20 @@ from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import pci_device_pool
from nova.pci import device
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_pci_device
fake_compute_node = {
'pci_stats': [{"count": 3,
"vendor_id": "8086",
"product_id": "1520",
"extra_info": {"phys_function": '[["0x0000", "0x04", '
'"0x00", "0x1"]]'}}]}
pci_stats = [{"count": 3,
"vendor_id": "8086",
"product_id": "1520",
"extra_info": {"phys_function": '[["0x0000", "0x04", '
'"0x00", "0x1"]]'}}]
fake_compute_node = objects.ComputeNode(
pci_device_pools=pci_device_pool.from_pci_stats(pci_stats))
class FakeResponse(wsgi.ResponseObject):
@ -120,8 +121,6 @@ class PciHypervisorControllerTestV21(test.NoDBTestCase):
def test_show(self):
def fake_get_db_compute_node(id):
fake_compute_node['pci_stats'] = jsonutils.dumps(
fake_compute_node['pci_stats'])
return fake_compute_node
req = fakes.HTTPRequest.blank('/os-hypervisors/1',
@ -130,15 +129,11 @@ class PciHypervisorControllerTestV21(test.NoDBTestCase):
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.show(req, resp, '1')
self.assertIn('os-pci:pci_stats', resp.obj['hypervisor'])
fake_compute_node['pci_stats'] = jsonutils.loads(
fake_compute_node['pci_stats'])
self.assertEqual(fake_compute_node['pci_stats'][0],
self.assertEqual(pci_stats[0],
resp.obj['hypervisor']['os-pci:pci_stats'][0])
def test_detail(self):
def fake_get_db_compute_node(id):
fake_compute_node['pci_stats'] = jsonutils.dumps(
fake_compute_node['pci_stats'])
return fake_compute_node
req = fakes.HTTPRequest.blank('/os-hypervisors/detail',
@ -146,10 +141,8 @@ class PciHypervisorControllerTestV21(test.NoDBTestCase):
resp = FakeResponse(self.fake_objs, '')
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.detail(req, resp)
fake_compute_node['pci_stats'] = jsonutils.loads(
fake_compute_node['pci_stats'])
self.assertIn('os-pci:pci_stats', resp.obj['hypervisors'][0])
self.assertEqual(fake_compute_node['pci_stats'][0],
self.assertEqual(pci_stats[0],
resp.obj['hypervisors'][0]['os-pci:pci_stats'][0])
@ -187,17 +180,17 @@ class PciControlletestV21(test.NoDBTestCase):
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '0')
def _fake_compute_node_get_all(self, context):
return [dict(id=1,
service_id=1,
host='fake',
cpu_info='cpu_info',
disk_available_least=100)]
return [objects.ComputeNode(id=1,
service_id=1,
host='fake',
cpu_info='cpu_info',
disk_available_least=100)]
def _fake_pci_device_get_all_by_node(self, context, node):
return [test_pci_device.fake_db_dev, test_pci_device.fake_db_dev_1]
def test_index(self):
self.stubs.Set(db, 'compute_node_get_all',
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
@ -217,7 +210,7 @@ class PciControlletestV21(test.NoDBTestCase):
result['pci_devices'][i]['address'])
def test_detail(self):
self.stubs.Set(db, 'compute_node_get_all',
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)

View File

@ -14,7 +14,6 @@
# under the License.
import mock
from oslo_serialization import jsonutils
from nova import exception
from nova import objects
@ -98,8 +97,8 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self.pci_stats.remove_device,
self.fake_dev_2)
def test_json_creat(self):
m = jsonutils.dumps(self.pci_stats)
def test_object_create(self):
m = objects.pci_device_pool.from_pci_stats(self.pci_stats.pools)
new_stats = stats.PciDeviceStats(m)
self.assertEqual(len(new_stats.pools), 3)